[llvm] 365f840 - [RISCV] Enable subregister liveness by default

Piyou Chen via llvm-commits llvm-commits at lists.llvm.org
Wed Mar 8 23:15:56 PST 2023


Author: Piyou Chen
Date: 2023-03-08T23:03:35-08:00
New Revision: 365f840398782dec07314eb710daf90b97ea53de

URL: https://github.com/llvm/llvm-project/commit/365f840398782dec07314eb710daf90b97ea53de
DIFF: https://github.com/llvm/llvm-project/commit/365f840398782dec07314eb710daf90b97ea53de.diff

LOG: [RISCV] Enable subregister liveness by default

This commit enable the subregister liveness by default in RISC-V.

It was previously disabled in https://reviews.llvm.org/D129646 after a previous attempt to enabled it https://reviews.llvm.org/D128016.

We believe that https://reviews.llvm.org/D129735 fixes the issue that caused it to be disabled.

Reviewed By: craig.topper, kito-cheng

Differential Revision: https://reviews.llvm.org/D145546

Added: 
    

Modified: 
    llvm/lib/Target/RISCV/RISCVSubtarget.cpp
    llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
    llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vector-segN-load.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vector-segN-store.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
    llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
    llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
    llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll
    llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
    llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
    llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
    llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll
    llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll
    llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
    llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
    llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
    llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll
    llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll
    llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
    llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
    llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll
    llvm/test/CodeGen/RISCV/shuffle-reverse.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
index a6700fa3f4ea7..47b47271cc9be 100644
--- a/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
+++ b/llvm/lib/Target/RISCV/RISCVSubtarget.cpp
@@ -30,7 +30,7 @@ using namespace llvm;
 #include "RISCVGenSubtargetInfo.inc"
 
 static cl::opt<bool> EnableSubRegLiveness("riscv-enable-subreg-liveness",
-                                          cl::init(false), cl::Hidden);
+                                          cl::init(true), cl::Hidden);
 
 static cl::opt<unsigned> RVVVectorLMULMax(
     "riscv-v-fixed-length-vector-lmul-max",

diff  --git a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
index bbf9ac8e92fde..de3bbf360a360 100644
--- a/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
+++ b/llvm/test/CodeGen/RISCV/fpclamptosat_vec.ll
@@ -476,24 +476,24 @@ define <4 x i32> @stest_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    sd a0, 8(sp)
 ; CHECK-V-NEXT:    addi a0, sp, 24
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
-; CHECK-V-NEXT:    mv a0, sp
 ; CHECK-V-NEXT:    vle64.v v10, (a0)
+; CHECK-V-NEXT:    mv a0, sp
+; CHECK-V-NEXT:    vle64.v v8, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
+; CHECK-V-NEXT:    vle64.v v10, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 2
 ; CHECK-V-NEXT:    addi a0, sp, 8
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
+; CHECK-V-NEXT:    vle64.v v10, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-V-NEXT:    lui a0, 524288
 ; CHECK-V-NEXT:    addiw a1, a0, -1
-; CHECK-V-NEXT:    vmin.vx v8, v10, a1
+; CHECK-V-NEXT:    vmin.vx v8, v8, a1
 ; CHECK-V-NEXT:    vmax.vx v10, v8, a0
 ; CHECK-V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-V-NEXT:    vnsrl.wi v8, v10, 0
@@ -626,24 +626,24 @@ define <4 x i32> @utesth_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    sd a0, 8(sp)
 ; CHECK-V-NEXT:    addi a0, sp, 24
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
-; CHECK-V-NEXT:    mv a0, sp
 ; CHECK-V-NEXT:    vle64.v v10, (a0)
+; CHECK-V-NEXT:    mv a0, sp
+; CHECK-V-NEXT:    vle64.v v8, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
+; CHECK-V-NEXT:    vle64.v v10, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 2
 ; CHECK-V-NEXT:    addi a0, sp, 8
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
+; CHECK-V-NEXT:    vle64.v v10, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-V-NEXT:    li a0, -1
 ; CHECK-V-NEXT:    srli a0, a0, 32
-; CHECK-V-NEXT:    vminu.vx v10, v10, a0
+; CHECK-V-NEXT:    vminu.vx v10, v8, a0
 ; CHECK-V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-V-NEXT:    vnsrl.wi v8, v10, 0
 ; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
@@ -785,24 +785,24 @@ define <4 x i32> @ustest_f16i32(<4 x half> %x) {
 ; CHECK-V-NEXT:    sd a0, 8(sp)
 ; CHECK-V-NEXT:    addi a0, sp, 24
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
-; CHECK-V-NEXT:    mv a0, sp
 ; CHECK-V-NEXT:    vle64.v v10, (a0)
+; CHECK-V-NEXT:    mv a0, sp
+; CHECK-V-NEXT:    vle64.v v8, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
+; CHECK-V-NEXT:    vle64.v v10, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 2
 ; CHECK-V-NEXT:    addi a0, sp, 8
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
+; CHECK-V-NEXT:    vle64.v v10, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-V-NEXT:    li a0, -1
 ; CHECK-V-NEXT:    srli a0, a0, 32
-; CHECK-V-NEXT:    vmin.vx v8, v10, a0
+; CHECK-V-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-V-NEXT:    vmax.vx v10, v8, zero
 ; CHECK-V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-V-NEXT:    vnsrl.wi v8, v10, 0
@@ -3763,24 +3763,24 @@ define <4 x i32> @stest_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    sd a0, 8(sp)
 ; CHECK-V-NEXT:    addi a0, sp, 24
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
-; CHECK-V-NEXT:    mv a0, sp
 ; CHECK-V-NEXT:    vle64.v v10, (a0)
+; CHECK-V-NEXT:    mv a0, sp
+; CHECK-V-NEXT:    vle64.v v8, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
+; CHECK-V-NEXT:    vle64.v v10, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 2
 ; CHECK-V-NEXT:    addi a0, sp, 8
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
+; CHECK-V-NEXT:    vle64.v v10, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-V-NEXT:    lui a0, 524288
 ; CHECK-V-NEXT:    addiw a1, a0, -1
-; CHECK-V-NEXT:    vmin.vx v8, v10, a1
+; CHECK-V-NEXT:    vmin.vx v8, v8, a1
 ; CHECK-V-NEXT:    vmax.vx v10, v8, a0
 ; CHECK-V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-V-NEXT:    vnsrl.wi v8, v10, 0
@@ -3911,24 +3911,24 @@ define <4 x i32> @utesth_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    sd a0, 8(sp)
 ; CHECK-V-NEXT:    addi a0, sp, 24
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
-; CHECK-V-NEXT:    mv a0, sp
 ; CHECK-V-NEXT:    vle64.v v10, (a0)
+; CHECK-V-NEXT:    mv a0, sp
+; CHECK-V-NEXT:    vle64.v v8, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
+; CHECK-V-NEXT:    vle64.v v10, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 2
 ; CHECK-V-NEXT:    addi a0, sp, 8
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
+; CHECK-V-NEXT:    vle64.v v10, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-V-NEXT:    li a0, -1
 ; CHECK-V-NEXT:    srli a0, a0, 32
-; CHECK-V-NEXT:    vminu.vx v10, v10, a0
+; CHECK-V-NEXT:    vminu.vx v10, v8, a0
 ; CHECK-V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-V-NEXT:    vnsrl.wi v8, v10, 0
 ; CHECK-V-NEXT:    ld ra, 56(sp) # 8-byte Folded Reload
@@ -4069,24 +4069,24 @@ define <4 x i32> @ustest_f16i32_mm(<4 x half> %x) {
 ; CHECK-V-NEXT:    sd a0, 8(sp)
 ; CHECK-V-NEXT:    addi a0, sp, 24
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
-; CHECK-V-NEXT:    mv a0, sp
 ; CHECK-V-NEXT:    vle64.v v10, (a0)
+; CHECK-V-NEXT:    mv a0, sp
+; CHECK-V-NEXT:    vle64.v v8, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 1
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 1
 ; CHECK-V-NEXT:    addi a0, sp, 16
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
+; CHECK-V-NEXT:    vle64.v v10, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 3, e64, m2, tu, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 2
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 2
 ; CHECK-V-NEXT:    addi a0, sp, 8
 ; CHECK-V-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-V-NEXT:    vle64.v v8, (a0)
+; CHECK-V-NEXT:    vle64.v v10, (a0)
 ; CHECK-V-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
-; CHECK-V-NEXT:    vslideup.vi v10, v8, 3
+; CHECK-V-NEXT:    vslideup.vi v8, v10, 3
 ; CHECK-V-NEXT:    li a0, -1
 ; CHECK-V-NEXT:    srli a0, a0, 32
-; CHECK-V-NEXT:    vmin.vx v8, v10, a0
+; CHECK-V-NEXT:    vmin.vx v8, v8, a0
 ; CHECK-V-NEXT:    vmax.vx v10, v8, zero
 ; CHECK-V-NEXT:    vsetvli zero, zero, e32, m1, ta, ma
 ; CHECK-V-NEXT:    vnsrl.wi v8, v10, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
index 53995fe70704d..8dfb472f1f805 100644
--- a/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/extract-subvector.ll
@@ -5,7 +5,6 @@
 define <vscale x 4 x i32> @extract_nxv8i32_nxv4i32_0(<vscale x 8 x i32> %vec) {
 ; CHECK-LABEL: extract_nxv8i32_nxv4i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m4
 ; CHECK-NEXT:    ret
   %c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv8i32(<vscale x 8 x i32> %vec, i64 0)
   ret <vscale x 4 x i32> %c
@@ -23,7 +22,6 @@ define <vscale x 4 x i32> @extract_nxv8i32_nxv4i32_4(<vscale x 8 x i32> %vec) {
 define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_0(<vscale x 8 x i32> %vec) {
 ; CHECK-LABEL: extract_nxv8i32_nxv2i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8m4
 ; CHECK-NEXT:    ret
   %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv8i32(<vscale x 8 x i32> %vec, i64 0)
   ret <vscale x 2 x i32> %c
@@ -59,7 +57,6 @@ define <vscale x 2 x i32> @extract_nxv8i32_nxv2i32_6(<vscale x 8 x i32> %vec) {
 define <vscale x 8 x i32> @extract_nxv16i32_nxv8i32_0(<vscale x 16 x i32> %vec) {
 ; CHECK-LABEL: extract_nxv16i32_nxv8i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m8
 ; CHECK-NEXT:    ret
   %c = call <vscale x 8 x i32> @llvm.vector.extract.nxv8i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
   ret <vscale x 8 x i32> %c
@@ -77,7 +74,6 @@ define <vscale x 8 x i32> @extract_nxv16i32_nxv8i32_8(<vscale x 16 x i32> %vec)
 define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_0(<vscale x 16 x i32> %vec) {
 ; CHECK-LABEL: extract_nxv16i32_nxv4i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m8
 ; CHECK-NEXT:    ret
   %c = call <vscale x 4 x i32> @llvm.vector.extract.nxv4i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
   ret <vscale x 4 x i32> %c
@@ -113,7 +109,6 @@ define <vscale x 4 x i32> @extract_nxv16i32_nxv4i32_12(<vscale x 16 x i32> %vec)
 define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_0(<vscale x 16 x i32> %vec) {
 ; CHECK-LABEL: extract_nxv16i32_nxv2i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8m8
 ; CHECK-NEXT:    ret
   %c = call <vscale x 2 x i32> @llvm.vector.extract.nxv2i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
   ret <vscale x 2 x i32> %c
@@ -185,7 +180,6 @@ define <vscale x 2 x i32> @extract_nxv16i32_nxv2i32_14(<vscale x 16 x i32> %vec)
 define <vscale x 1 x i32> @extract_nxv16i32_nxv1i32_0(<vscale x 16 x i32> %vec) {
 ; CHECK-LABEL: extract_nxv16i32_nxv1i32_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8m8
 ; CHECK-NEXT:    ret
   %c = call <vscale x 1 x i32> @llvm.vector.extract.nxv1i32.nxv16i32(<vscale x 16 x i32> %vec, i64 0)
   ret <vscale x 1 x i32> %c
@@ -247,7 +241,6 @@ define <vscale x 1 x i32> @extract_nxv2i32_nxv1i32_0(<vscale x 2 x i32> %vec) {
 define <vscale x 2 x i8> @extract_nxv32i8_nxv2i8_0(<vscale x 32 x i8> %vec) {
 ; CHECK-LABEL: extract_nxv32i8_nxv2i8_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8m4
 ; CHECK-NEXT:    ret
   %c = call <vscale x 2 x i8> @llvm.vector.extract.nxv2i8.nxv32i8(<vscale x 32 x i8> %vec, i64 0)
   ret <vscale x 2 x i8> %c
@@ -344,7 +337,6 @@ define <vscale x 1 x i8> @extract_nxv4i8_nxv1i8_3(<vscale x 4 x i8> %vec) {
 define <vscale x 2 x half> @extract_nxv2f16_nxv16f16_0(<vscale x 16 x half> %vec) {
 ; CHECK-LABEL: extract_nxv2f16_nxv16f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8m4
 ; CHECK-NEXT:    ret
   %c = call <vscale x 2 x half> @llvm.vector.extract.nxv2f16.nxv16f16(<vscale x 16 x half> %vec, i64 0)
   ret <vscale x 2 x half> %c
@@ -467,7 +459,6 @@ define <vscale x 16 x i1> @extract_nxv16i1_nxv32i1_16(<vscale x 32 x i1> %x) {
 define <vscale x 6 x half> @extract_nxv6f16_nxv12f16_0(<vscale x 12 x half> %in) {
 ; CHECK-LABEL: extract_nxv6f16_nxv12f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m4
 ; CHECK-NEXT:    ret
   %res = call <vscale x 6 x half> @llvm.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 0)
   ret <vscale x 6 x half> %res
@@ -479,14 +470,13 @@ define <vscale x 6 x half> @extract_nxv6f16_nxv12f16_6(<vscale x 12 x half> %in)
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vx v14, v10, a0
-; CHECK-NEXT:    vslidedown.vx v12, v9, a0
+; CHECK-NEXT:    vslidedown.vx v11, v10, a0
+; CHECK-NEXT:    vslidedown.vx v8, v9, a0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, ma
-; CHECK-NEXT:    vslideup.vi v13, v14, 0
+; CHECK-NEXT:    vslideup.vi v9, v11, 0
 ; CHECK-NEXT:    add a1, a0, a0
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, ma
-; CHECK-NEXT:    vslideup.vx v12, v10, a0
-; CHECK-NEXT:    vmv2r.v v8, v12
+; CHECK-NEXT:    vslideup.vx v8, v10, a0
 ; CHECK-NEXT:    ret
   %res = call <vscale x 6 x half> @llvm.vector.extract.nxv6f16.nxv12f16(<vscale x 12 x half> %in, i64 6)
   ret <vscale x 6 x half> %res

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll
index 04d394a02188a..a31197366b9a7 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-fptrunc-vp.ll
@@ -94,15 +94,7 @@ declare <32 x float> @llvm.vp.fptrunc.v32f64.v32f32(<32 x double>, <32 x i1>, i3
 define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vfptrunc_v32f32_v32f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
-; CHECK-NEXT:    sub sp, sp, a1
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    addi a1, sp, 16
-; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT:    vmv1r.v v28, v0
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v0, v0, 2
 ; CHECK-NEXT:    addi a1, a0, -16
@@ -111,24 +103,18 @@ define <32 x float> @vfptrunc_v32f32_v32f64(<32 x double> %a, <32 x i1> %m, i32
 ; CHECK-NEXT:    and a1, a2, a1
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    li a1, 16
-; CHECK-NEXT:    vfncvt.f.f.w v8, v16, v0.t
+; CHECK-NEXT:    vfncvt.f.f.w v24, v16, v0.t
 ; CHECK-NEXT:    bltu a0, a1, .LBB7_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:  .LBB7_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vfncvt.f.f.w v16, v24, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v28
+; CHECK-NEXT:    vfncvt.f.f.w v16, v8, v0.t
 ; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
-; CHECK-NEXT:    vslideup.vi v16, v8, 16
+; CHECK-NEXT:    vslideup.vi v16, v24, 16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 3
-; CHECK-NEXT:    add sp, sp, a0
-; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
   %v = call <32 x float> @llvm.vp.fptrunc.v32f64.v32f32(<32 x double> %a, <32 x i1> %m, i32 %vl)
   ret <32 x float> %v

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-segN-load.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-segN-load.ll
index 15d65a2a6aaa2..03b98412ce1b2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-segN-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-segN-load.ll
@@ -7,7 +7,6 @@ define <8 x i8> @load_factor2(ptr %ptr) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
     %1 = call { <8 x i8>, <8 x i8> } @llvm.riscv.seg2.load.v8i8.p0.i64(ptr %ptr, i64 8)
     %2 = extractvalue { <8 x i8>, <8 x i8> } %1, 0
@@ -20,7 +19,6 @@ define <8 x i8> @load_factor3(ptr %ptr) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg3e8.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v6_v7_v8
 ; CHECK-NEXT:    ret
     %1 = call { <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg3.load.v8i8.p0.i64(ptr %ptr, i64 8)
     %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
@@ -34,7 +32,6 @@ define <8 x i8> @load_factor4(ptr %ptr) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg4e8.v v5, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v5_v6_v7_v8
 ; CHECK-NEXT:    ret
     %1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg4.load.v8i8.p0.i64(ptr %ptr, i64 8)
     %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
@@ -49,7 +46,6 @@ define <8 x i8> @load_factor5(ptr %ptr) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg5e8.v v4, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v4_v5_v6_v7_v8
 ; CHECK-NEXT:    ret
     %1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg5.load.v8i8.p0.i64(ptr %ptr, i64 8)
     %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
@@ -65,7 +61,6 @@ define <8 x i8> @load_factor6(ptr %ptr) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg6e8.v v3, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v3_v4_v5_v6_v7_v8
 ; CHECK-NEXT:    ret
     %1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg6.load.v8i8.p0.i64(ptr %ptr, i64 8)
     %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
@@ -82,7 +77,6 @@ define <8 x i8> @load_factor7(ptr %ptr) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg7e8.v v2, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v2_v3_v4_v5_v6_v7_v8
 ; CHECK-NEXT:    ret
     %1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg7.load.v8i8.p0.i64(ptr %ptr, i64 8)
     %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0
@@ -100,7 +94,6 @@ define <8 x i8> @load_factor8(ptr %ptr) {
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg8e8.v v1, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v1_v2_v3_v4_v5_v6_v7_v8
 ; CHECK-NEXT:    ret
     %1 = call { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } @llvm.riscv.seg8.load.v8i8.p0.i64(ptr %ptr, i64 8)
     %2 = extractvalue { <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8>, <8 x i8> } %1, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-segN-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-segN-store.ll
index 0cba042b3363e..0b4184ad7474f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-segN-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-segN-store.ll
@@ -6,9 +6,7 @@ declare void @llvm.riscv.seg2.store.v8i8.p0.iXLen(<8 x i8>, <8 x i8>, ptr, iXLen
 define void @store_factor2(<8 x i8> %v0, <8 x i8> %v1, ptr %ptr) {
 ; CHECK-LABEL: store_factor2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v9 killed $v9 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0)
 ; CHECK-NEXT:    ret
     call void @llvm.riscv.seg2.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, ptr %ptr, iXLen 8)
@@ -19,10 +17,7 @@ declare void @llvm.riscv.seg3.store.v8i8.p0.iXLen(<8 x i8>, <8 x i8>, <8 x i8>,
 define void @store_factor3(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, ptr %ptr) {
 ; CHECK-LABEL: store_factor3:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v10 killed $v10 killed $v8_v9_v10 def $v8_v9_v10
-; CHECK-NEXT:    # kill: def $v9 killed $v9 killed $v8_v9_v10 def $v8_v9_v10
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9_v10 def $v8_v9_v10
 ; CHECK-NEXT:    vsseg3e8.v v8, (a0)
 ; CHECK-NEXT:    ret
     call void @llvm.riscv.seg3.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, ptr %ptr, iXLen 8)
@@ -33,11 +28,7 @@ declare void @llvm.riscv.seg4.store.v8i8.p0.iXLen(<8 x i8>, <8 x i8>, <8 x i8>,
 define void @store_factor4(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, ptr %ptr) {
 ; CHECK-LABEL: store_factor4:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v11 killed $v11 killed $v8_v9_v10_v11 def $v8_v9_v10_v11
-; CHECK-NEXT:    # kill: def $v10 killed $v10 killed $v8_v9_v10_v11 def $v8_v9_v10_v11
-; CHECK-NEXT:    # kill: def $v9 killed $v9 killed $v8_v9_v10_v11 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9_v10_v11 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vsseg4e8.v v8, (a0)
 ; CHECK-NEXT:    ret
     call void @llvm.riscv.seg4.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, ptr %ptr, iXLen 8)
@@ -48,12 +39,7 @@ declare void @llvm.riscv.seg5.store.v8i8.p0.iXLen(<8 x i8>, <8 x i8>, <8 x i8>,
 define void @store_factor5(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, ptr %ptr) {
 ; CHECK-LABEL: store_factor5:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v12 killed $v12 killed $v8_v9_v10_v11_v12 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    # kill: def $v11 killed $v11 killed $v8_v9_v10_v11_v12 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    # kill: def $v10 killed $v10 killed $v8_v9_v10_v11_v12 def $v8_v9_v10_v11_v12
-; CHECK-NEXT:    # kill: def $v9 killed $v9 killed $v8_v9_v10_v11_v12 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9_v10_v11_v12 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vsseg5e8.v v8, (a0)
 ; CHECK-NEXT:    ret
     call void @llvm.riscv.seg5.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, ptr %ptr, iXLen 8)
@@ -64,13 +50,7 @@ declare void @llvm.riscv.seg6.store.v8i8.p0.iXLen(<8 x i8>, <8 x i8>, <8 x i8>,
 define void @store_factor6(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, ptr %ptr) {
 ; CHECK-LABEL: store_factor6:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v13 killed $v13 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    # kill: def $v12 killed $v12 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    # kill: def $v11 killed $v11 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    # kill: def $v10 killed $v10 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
-; CHECK-NEXT:    # kill: def $v9 killed $v9 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9_v10_v11_v12_v13 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vsseg6e8.v v8, (a0)
 ; CHECK-NEXT:    ret
     call void @llvm.riscv.seg6.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, ptr %ptr, iXLen 8)
@@ -81,14 +61,7 @@ declare void @llvm.riscv.seg7.store.v8i8.p0.iXLen(<8 x i8>, <8 x i8>, <8 x i8>,
 define void @store_factor7(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, ptr %ptr) {
 ; CHECK-LABEL: store_factor7:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v14 killed $v14 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    # kill: def $v13 killed $v13 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    # kill: def $v12 killed $v12 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    # kill: def $v11 killed $v11 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    # kill: def $v10 killed $v10 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
-; CHECK-NEXT:    # kill: def $v9 killed $v9 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9_v10_v11_v12_v13_v14 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vsseg7e8.v v8, (a0)
 ; CHECK-NEXT:    ret
     call void @llvm.riscv.seg7.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, ptr %ptr, iXLen 8)
@@ -99,15 +72,7 @@ declare void @llvm.riscv.seg8.store.v8i8.p0.iXLen(<8 x i8>, <8 x i8>, <8 x i8>,
 define void @store_factor8(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, <8 x i8> %v7, ptr %ptr) {
 ; CHECK-LABEL: store_factor8:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v15 killed $v15 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    # kill: def $v14 killed $v14 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    # kill: def $v13 killed $v13 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    # kill: def $v12 killed $v12 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    # kill: def $v11 killed $v11 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    # kill: def $v10 killed $v10 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
-; CHECK-NEXT:    # kill: def $v9 killed $v9 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vsetivli zero, 8, e8, mf2, ta, ma
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9_v10_v11_v12_v13_v14_v15 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vsseg8e8.v v8, (a0)
 ; CHECK-NEXT:    ret
     call void @llvm.riscv.seg8.store.v8i8.p0.iXLen(<8 x i8> %v0, <8 x i8> %v1, <8 x i8> %v2, <8 x i8> %v3, <8 x i8> %v4, <8 x i8> %v5, <8 x i8> %v6, <8 x i8> %v7, ptr %ptr, iXLen 8)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll
index e5f83f8ad0aef..e697cb728b4b9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll
@@ -53,15 +53,7 @@ declare <128 x i7> @llvm.vp.trunc.v128i7.v128i16(<128 x i16>, <128 x i1>, i32)
 define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vtrunc_v128i7_v128i16:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
-; CHECK-NEXT:    sub sp, sp, a1
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    addi a1, sp, 16
-; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT:    vmv1r.v v28, v0
 ; CHECK-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v0, v0, 8
 ; CHECK-NEXT:    addi a1, a0, -64
@@ -70,24 +62,18 @@ define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zero
 ; CHECK-NEXT:    and a1, a2, a1
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    li a1, 64
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0, v0.t
+; CHECK-NEXT:    vnsrl.wi v24, v16, 0, v0.t
 ; CHECK-NEXT:    bltu a0, a1, .LBB4_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a0, 64
 ; CHECK-NEXT:  .LBB4_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vnsrl.wi v16, v24, 0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v28
+; CHECK-NEXT:    vnsrl.wi v16, v8, 0, v0.t
 ; CHECK-NEXT:    li a0, 128
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, tu, ma
-; CHECK-NEXT:    vslideup.vx v16, v8, a1
+; CHECK-NEXT:    vslideup.vx v16, v24, a1
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 3
-; CHECK-NEXT:    add sp, sp, a0
-; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
   %v = call <128 x i7> @llvm.vp.trunc.v128i7.v128i16(<128 x i16> %a, <128 x i1> %m, i32 %vl)
   ret <128 x i7> %v
@@ -247,13 +233,13 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
 ; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 56 * vlenb
 ; CHECK-NEXT:    vmv1r.v v1, v0
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    li a3, 24
-; CHECK-NEXT:    mul a2, a2, a3
+; CHECK-NEXT:    slli a2, a2, 4
 ; CHECK-NEXT:    add a2, sp, a2
 ; CHECK-NEXT:    addi a2, a2, 16
 ; CHECK-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    slli a2, a2, 5
+; CHECK-NEXT:    li a3, 24
+; CHECK-NEXT:    mul a2, a2, a3
 ; CHECK-NEXT:    add a2, sp, a2
 ; CHECK-NEXT:    addi a2, a2, 16
 ; CHECK-NEXT:    vs8r.v v8, (a2) # Unknown-size Folded Spill
@@ -290,7 +276,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a5, e32, m4, ta, ma
 ; CHECK-NEXT:    vnsrl.wi v16, v8, 0, v0.t
 ; CHECK-NEXT:    csrr a2, vlenb
-; CHECK-NEXT:    slli a2, a2, 4
+; CHECK-NEXT:    slli a2, a2, 3
 ; CHECK-NEXT:    add a2, sp, a2
 ; CHECK-NEXT:    addi a2, a2, 16
 ; CHECK-NEXT:    vs8r.v v16, (a2) # Unknown-size Folded Spill
@@ -326,7 +312,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
 ; CHECK-NEXT:  .LBB16_4:
 ; CHECK-NEXT:    li a3, 32
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT:    vle64.v v16, (a1)
+; CHECK-NEXT:    vle64.v v24, (a1)
 ; CHECK-NEXT:    addi a5, a7, -32
 ; CHECK-NEXT:    sltu a6, a7, a5
 ; CHECK-NEXT:    addi a6, a6, -1
@@ -337,12 +323,9 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
 ; CHECK-NEXT:    and a6, t0, a6
 ; CHECK-NEXT:    vsetvli zero, a6, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v4
-; CHECK-NEXT:    vnsrl.wi v24, v8, 0, v0.t
-; CHECK-NEXT:    csrr a6, vlenb
-; CHECK-NEXT:    slli a6, a6, 3
-; CHECK-NEXT:    add a6, sp, a6
-; CHECK-NEXT:    addi a6, a6, 16
-; CHECK-NEXT:    vs8r.v v24, (a6) # Unknown-size Folded Spill
+; CHECK-NEXT:    vnsrl.wi v16, v8, 0, v0.t
+; CHECK-NEXT:    addi a6, sp, 16
+; CHECK-NEXT:    vs8r.v v16, (a6) # Unknown-size Folded Spill
 ; CHECK-NEXT:    bltu a5, a2, .LBB16_6
 ; CHECK-NEXT:  # %bb.5:
 ; CHECK-NEXT:    li a5, 16
@@ -351,7 +334,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
 ; CHECK-NEXT:    addi a1, a1, 256
 ; CHECK-NEXT:    vsetvli zero, a5, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0, v0.t
+; CHECK-NEXT:    vnsrl.wi v8, v24, 0, v0.t
 ; CHECK-NEXT:    csrr a5, vlenb
 ; CHECK-NEXT:    li t0, 40
 ; CHECK-NEXT:    mul a5, a5, t0
@@ -363,10 +346,10 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
 ; CHECK-NEXT:    li a4, 32
 ; CHECK-NEXT:  .LBB16_8:
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vi v4, v3, 2
+; CHECK-NEXT:    vslidedown.vi v20, v3, 2
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
-; CHECK-NEXT:    vle64.v v16, (a6)
-; CHECK-NEXT:    vle64.v v24, (a1)
+; CHECK-NEXT:    vle64.v v24, (a6)
+; CHECK-NEXT:    vle64.v v8, (a1)
 ; CHECK-NEXT:    mv a1, a4
 ; CHECK-NEXT:    bltu a4, a2, .LBB16_10
 ; CHECK-NEXT:  # %bb.9:
@@ -376,16 +359,19 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
 ; CHECK-NEXT:    vslidedown.vi v2, v1, 2
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v3
-; CHECK-NEXT:    vnsrl.wi v8, v24, 0, v0.t
+; CHECK-NEXT:    vnsrl.wi v16, v8, 0, v0.t
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 5
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
 ; CHECK-NEXT:    addi a1, a4, -16
 ; CHECK-NEXT:    sltu a4, a4, a1
 ; CHECK-NEXT:    addi a4, a4, -1
 ; CHECK-NEXT:    and a1, a4, a1
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v4
-; CHECK-NEXT:    vnsrl.wi v24, v16, 0, v0.t
-; CHECK-NEXT:    addi a1, sp, 16
-; CHECK-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT:    vmv1r.v v0, v20
+; CHECK-NEXT:    vnsrl.wi v8, v24, 0, v0.t
 ; CHECK-NEXT:    bltu a7, a3, .LBB16_12
 ; CHECK-NEXT:  # %bb.11:
 ; CHECK-NEXT:    li a7, 32
@@ -398,7 +384,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
 ; CHECK-NEXT:    addi a1, a1, 16
 ; CHECK-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 4
+; CHECK-NEXT:    slli a1, a1, 3
 ; CHECK-NEXT:    add a1, sp, a1
 ; CHECK-NEXT:    addi a1, a1, 16
 ; CHECK-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
@@ -415,10 +401,7 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
 ; CHECK-NEXT:    add a1, sp, a1
 ; CHECK-NEXT:    addi a1, a1, 16
 ; CHECK-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
-; CHECK-NEXT:    add a1, sp, a1
-; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    addi a1, sp, 16
 ; CHECK-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vslideup.vi v16, v24, 16
 ; CHECK-NEXT:    csrr a1, vlenb
@@ -427,9 +410,17 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
 ; CHECK-NEXT:    add a1, sp, a1
 ; CHECK-NEXT:    addi a1, a1, 16
 ; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
-; CHECK-NEXT:    addi a1, sp, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 5
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
 ; CHECK-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT:    vslideup.vi v8, v16, 16
+; CHECK-NEXT:    vslideup.vi v16, v8, 16
+; CHECK-NEXT:    csrr a1, vlenb
+; CHECK-NEXT:    slli a1, a1, 5
+; CHECK-NEXT:    add a1, sp, a1
+; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
 ; CHECK-NEXT:    addi a1, a7, -16
 ; CHECK-NEXT:    sltu a4, a7, a1
 ; CHECK-NEXT:    addi a4, a4, -1
@@ -437,17 +428,11 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v2
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    li a4, 24
-; CHECK-NEXT:    mul a1, a1, a4
-; CHECK-NEXT:    add a1, sp, a1
-; CHECK-NEXT:    addi a1, a1, 16
-; CHECK-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT:    vnsrl.wi v24, v16, 0, v0.t
-; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    slli a1, a1, 4
 ; CHECK-NEXT:    add a1, sp, a1
 ; CHECK-NEXT:    addi a1, a1, 16
-; CHECK-NEXT:    vs8r.v v24, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    vnsrl.wi v8, v16, 0, v0.t
 ; CHECK-NEXT:    bltu a7, a2, .LBB16_14
 ; CHECK-NEXT:  # %bb.13:
 ; CHECK-NEXT:    li a7, 16
@@ -455,20 +440,21 @@ define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 ze
 ; CHECK-NEXT:    vsetvli zero, a7, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 5
+; CHECK-NEXT:    li a2, 24
+; CHECK-NEXT:    mul a1, a1, a2
 ; CHECK-NEXT:    add a1, sp, a1
 ; CHECK-NEXT:    addi a1, a1, 16
-; CHECK-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT:    vnsrl.wi v24, v16, 0, v0.t
+; CHECK-NEXT:    vl8r.v v24, (a1) # Unknown-size Folded Reload
+; CHECK-NEXT:    vnsrl.wi v16, v24, 0, v0.t
 ; CHECK-NEXT:    vsetvli zero, a3, e32, m8, tu, ma
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 4
-; CHECK-NEXT:    add a1, sp, a1
-; CHECK-NEXT:    addi a1, a1, 16
-; CHECK-NEXT:    vl8r.v v16, (a1) # Unknown-size Folded Reload
-; CHECK-NEXT:    vslideup.vi v24, v16, 16
-; CHECK-NEXT:    vse32.v v24, (a0)
+; CHECK-NEXT:    vslideup.vi v16, v8, 16
+; CHECK-NEXT:    vse32.v v16, (a0)
 ; CHECK-NEXT:    addi a1, a0, 256
+; CHECK-NEXT:    csrr a2, vlenb
+; CHECK-NEXT:    slli a2, a2, 5
+; CHECK-NEXT:    add a2, sp, a2
+; CHECK-NEXT:    addi a2, a2, 16
+; CHECK-NEXT:    vl8r.v v8, (a2) # Unknown-size Folded Reload
 ; CHECK-NEXT:    vse32.v v8, (a1)
 ; CHECK-NEXT:    addi a1, a0, 128
 ; CHECK-NEXT:    csrr a2, vlenb
@@ -501,15 +487,7 @@ declare <32 x i32> @llvm.vp.trunc.v32i32.v32i64(<32 x i64>, <32 x i1>, i32)
 define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext %vl) {
 ; CHECK-LABEL: vtrunc_v32i32_v32i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
-; CHECK-NEXT:    sub sp, sp, a1
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
-; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    addi a1, sp, 16
-; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
+; CHECK-NEXT:    vmv1r.v v28, v0
 ; CHECK-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v0, v0, 2
 ; CHECK-NEXT:    addi a1, a0, -16
@@ -518,24 +496,18 @@ define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext
 ; CHECK-NEXT:    and a1, a2, a1
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    li a1, 16
-; CHECK-NEXT:    vnsrl.wi v8, v16, 0, v0.t
+; CHECK-NEXT:    vnsrl.wi v24, v16, 0, v0.t
 ; CHECK-NEXT:    bltu a0, a1, .LBB17_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    li a0, 16
 ; CHECK-NEXT:  .LBB17_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vnsrl.wi v16, v24, 0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v28
+; CHECK-NEXT:    vnsrl.wi v16, v8, 0, v0.t
 ; CHECK-NEXT:    li a0, 32
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, ma
-; CHECK-NEXT:    vslideup.vi v16, v8, 16
+; CHECK-NEXT:    vslideup.vi v16, v24, 16
 ; CHECK-NEXT:    vmv8r.v v8, v16
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 3
-; CHECK-NEXT:    add sp, sp, a0
-; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret
   %v = call <32 x i32> @llvm.vp.trunc.v32i32.v32i64(<32 x i64> %a, <32 x i1> %m, i32 %vl)
   ret <32 x i32> %v

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
index e97a710ba4129..54d267b282d1b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-fp-interleave.ll
@@ -39,14 +39,13 @@ define <4 x double> @interleave_v2f64(<2 x double> %x, <2 x double> %y) {
 ; RV32-V128-LABEL: interleave_v2f64:
 ; RV32-V128:       # %bb.0:
 ; RV32-V128-NEXT:    vmv1r.v v12, v9
-; RV32-V128-NEXT:    # kill: def $v8 killed $v8 def $v8m2
 ; RV32-V128-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-V128-NEXT:    vid.v v10
-; RV32-V128-NEXT:    vsrl.vi v14, v10, 1
+; RV32-V128-NEXT:    vid.v v9
+; RV32-V128-NEXT:    vsrl.vi v14, v9, 1
 ; RV32-V128-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
+; RV32-V128-NEXT:    vrgatherei16.vv v10, v8, v14
 ; RV32-V128-NEXT:    li a0, 10
 ; RV32-V128-NEXT:    vmv.s.x v0, a0
-; RV32-V128-NEXT:    vrgatherei16.vv v10, v8, v14
 ; RV32-V128-NEXT:    vrgatherei16.vv v10, v12, v14, v0.t
 ; RV32-V128-NEXT:    vmv.v.v v8, v10
 ; RV32-V128-NEXT:    ret
@@ -54,13 +53,12 @@ define <4 x double> @interleave_v2f64(<2 x double> %x, <2 x double> %y) {
 ; RV64-V128-LABEL: interleave_v2f64:
 ; RV64-V128:       # %bb.0:
 ; RV64-V128-NEXT:    vmv1r.v v12, v9
-; RV64-V128-NEXT:    # kill: def $v8 killed $v8 def $v8m2
 ; RV64-V128-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV64-V128-NEXT:    vid.v v10
 ; RV64-V128-NEXT:    vsrl.vi v14, v10, 1
+; RV64-V128-NEXT:    vrgather.vv v10, v8, v14
 ; RV64-V128-NEXT:    li a0, 10
 ; RV64-V128-NEXT:    vmv.s.x v0, a0
-; RV64-V128-NEXT:    vrgather.vv v10, v8, v14
 ; RV64-V128-NEXT:    vrgather.vv v10, v12, v14, v0.t
 ; RV64-V128-NEXT:    vmv.v.v v8, v10
 ; RV64-V128-NEXT:    ret
@@ -266,9 +264,9 @@ define <64 x float> @interleave_v32f32(<32 x float> %x, <32 x float> %y) {
 ; RV32-V128-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-V128-NEXT:    vle32.v v0, (a0)
 ; RV32-V128-NEXT:    vmv8r.v v24, v8
-; RV32-V128-NEXT:    addi a0, sp, 16
-; RV32-V128-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
 ; RV32-V128-NEXT:    vrgather.vv v8, v24, v0
+; RV32-V128-NEXT:    addi a0, sp, 16
+; RV32-V128-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
 ; RV32-V128-NEXT:    lui a0, %hi(.LCPI10_1)
 ; RV32-V128-NEXT:    addi a0, a0, %lo(.LCPI10_1)
 ; RV32-V128-NEXT:    vle32.v v24, (a0)
@@ -317,9 +315,9 @@ define <64 x float> @interleave_v32f32(<32 x float> %x, <32 x float> %y) {
 ; RV64-V128-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV64-V128-NEXT:    vle32.v v0, (a0)
 ; RV64-V128-NEXT:    vmv8r.v v24, v8
-; RV64-V128-NEXT:    addi a0, sp, 16
-; RV64-V128-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
 ; RV64-V128-NEXT:    vrgather.vv v8, v24, v0
+; RV64-V128-NEXT:    addi a0, sp, 16
+; RV64-V128-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
 ; RV64-V128-NEXT:    lui a0, %hi(.LCPI10_1)
 ; RV64-V128-NEXT:    addi a0, a0, %lo(.LCPI10_1)
 ; RV64-V128-NEXT:    vle32.v v24, (a0)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
index 3d1b849b7a388..7a662952caf4c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-insert-subvector.ll
@@ -59,14 +59,14 @@ define <vscale x 8 x i32> @insert_nxv8i32_v8i32_0(<vscale x 8 x i32> %vec, ptr %
 ;
 ; LMULMAX1-LABEL: insert_nxv8i32_v8i32_0:
 ; LMULMAX1:       # %bb.0:
+; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT:    vle32.v v12, (a0)
-; LMULMAX1-NEXT:    addi a0, a0, 16
+; LMULMAX1-NEXT:    vle32.v v12, (a1)
 ; LMULMAX1-NEXT:    vle32.v v16, (a0)
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m4, tu, ma
-; LMULMAX1-NEXT:    vslideup.vi v8, v12, 0
+; LMULMAX1-NEXT:    vslideup.vi v8, v16, 0
 ; LMULMAX1-NEXT:    vsetivli zero, 8, e32, m4, tu, ma
-; LMULMAX1-NEXT:    vslideup.vi v8, v16, 4
+; LMULMAX1-NEXT:    vslideup.vi v8, v12, 4
 ; LMULMAX1-NEXT:    ret
   %sv = load <8 x i32>, ptr %svp
   %v = call <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 0)
@@ -84,14 +84,14 @@ define <vscale x 8 x i32> @insert_nxv8i32_v8i32_8(<vscale x 8 x i32> %vec, ptr %
 ;
 ; LMULMAX1-LABEL: insert_nxv8i32_v8i32_8:
 ; LMULMAX1:       # %bb.0:
+; LMULMAX1-NEXT:    addi a1, a0, 16
 ; LMULMAX1-NEXT:    vsetivli zero, 4, e32, m1, ta, ma
-; LMULMAX1-NEXT:    vle32.v v12, (a0)
-; LMULMAX1-NEXT:    addi a0, a0, 16
+; LMULMAX1-NEXT:    vle32.v v12, (a1)
 ; LMULMAX1-NEXT:    vle32.v v16, (a0)
 ; LMULMAX1-NEXT:    vsetivli zero, 12, e32, m4, tu, ma
-; LMULMAX1-NEXT:    vslideup.vi v8, v12, 8
+; LMULMAX1-NEXT:    vslideup.vi v8, v16, 8
 ; LMULMAX1-NEXT:    vsetivli zero, 16, e32, m4, tu, ma
-; LMULMAX1-NEXT:    vslideup.vi v8, v16, 12
+; LMULMAX1-NEXT:    vslideup.vi v8, v12, 12
 ; LMULMAX1-NEXT:    ret
   %sv = load <8 x i32>, ptr %svp
   %v = call <vscale x 8 x i32> @llvm.vector.insert.v8i32.nxv8i32(<vscale x 8 x i32> %vec, <8 x i32> %sv, i64 8)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
index 1208510ad8e2d..e007f84d01c93 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-exttrunc.ll
@@ -92,16 +92,16 @@ define void @sext_v32i8_v32i32(ptr %x, ptr %z) {
 ; LMULMAX2-NEXT:    vsetivli zero, 16, e8, m2, ta, ma
 ; LMULMAX2-NEXT:    vslidedown.vi v10, v8, 16
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
-; LMULMAX2-NEXT:    vslidedown.vi v14, v10, 8
+; LMULMAX2-NEXT:    vslidedown.vi v9, v10, 8
 ; LMULMAX2-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
-; LMULMAX2-NEXT:    vsext.vf4 v16, v14
-; LMULMAX2-NEXT:    vsext.vf4 v14, v8
+; LMULMAX2-NEXT:    vsext.vf4 v14, v9
+; LMULMAX2-NEXT:    vsext.vf4 v16, v8
 ; LMULMAX2-NEXT:    vsext.vf4 v8, v10
 ; LMULMAX2-NEXT:    addi a0, a1, 64
 ; LMULMAX2-NEXT:    vse32.v v8, (a0)
-; LMULMAX2-NEXT:    vse32.v v14, (a1)
+; LMULMAX2-NEXT:    vse32.v v16, (a1)
 ; LMULMAX2-NEXT:    addi a0, a1, 96
-; LMULMAX2-NEXT:    vse32.v v16, (a0)
+; LMULMAX2-NEXT:    vse32.v v14, (a0)
 ; LMULMAX2-NEXT:    addi a0, a1, 32
 ; LMULMAX2-NEXT:    vse32.v v12, (a0)
 ; LMULMAX2-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
index 2e97c5114483b..57312808ca9a2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-interleave.ll
@@ -52,14 +52,13 @@ define <4 x i64> @interleave_v2i64(<2 x i64> %x, <2 x i64> %y) {
 ; RV32-V128-LABEL: interleave_v2i64:
 ; RV32-V128:       # %bb.0:
 ; RV32-V128-NEXT:    vmv1r.v v12, v9
-; RV32-V128-NEXT:    # kill: def $v8 killed $v8 def $v8m2
 ; RV32-V128-NEXT:    vsetivli zero, 4, e16, mf2, ta, ma
-; RV32-V128-NEXT:    vid.v v10
-; RV32-V128-NEXT:    vsrl.vi v14, v10, 1
+; RV32-V128-NEXT:    vid.v v9
+; RV32-V128-NEXT:    vsrl.vi v14, v9, 1
 ; RV32-V128-NEXT:    vsetvli zero, zero, e64, m2, ta, mu
+; RV32-V128-NEXT:    vrgatherei16.vv v10, v8, v14
 ; RV32-V128-NEXT:    li a0, 10
 ; RV32-V128-NEXT:    vmv.s.x v0, a0
-; RV32-V128-NEXT:    vrgatherei16.vv v10, v8, v14
 ; RV32-V128-NEXT:    vrgatherei16.vv v10, v12, v14, v0.t
 ; RV32-V128-NEXT:    vmv.v.v v8, v10
 ; RV32-V128-NEXT:    ret
@@ -67,13 +66,12 @@ define <4 x i64> @interleave_v2i64(<2 x i64> %x, <2 x i64> %y) {
 ; RV64-V128-LABEL: interleave_v2i64:
 ; RV64-V128:       # %bb.0:
 ; RV64-V128-NEXT:    vmv1r.v v12, v9
-; RV64-V128-NEXT:    # kill: def $v8 killed $v8 def $v8m2
 ; RV64-V128-NEXT:    vsetivli zero, 4, e64, m2, ta, mu
 ; RV64-V128-NEXT:    vid.v v10
 ; RV64-V128-NEXT:    vsrl.vi v14, v10, 1
+; RV64-V128-NEXT:    vrgather.vv v10, v8, v14
 ; RV64-V128-NEXT:    li a0, 10
 ; RV64-V128-NEXT:    vmv.s.x v0, a0
-; RV64-V128-NEXT:    vrgather.vv v10, v8, v14
 ; RV64-V128-NEXT:    vrgather.vv v10, v12, v14, v0.t
 ; RV64-V128-NEXT:    vmv.v.v v8, v10
 ; RV64-V128-NEXT:    ret
@@ -370,9 +368,9 @@ define <64 x i32> @interleave_v32i32(<32 x i32> %x, <32 x i32> %y) {
 ; RV32-V128-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV32-V128-NEXT:    vle32.v v0, (a0)
 ; RV32-V128-NEXT:    vmv8r.v v24, v8
-; RV32-V128-NEXT:    addi a0, sp, 16
-; RV32-V128-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
 ; RV32-V128-NEXT:    vrgather.vv v8, v24, v0
+; RV32-V128-NEXT:    addi a0, sp, 16
+; RV32-V128-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
 ; RV32-V128-NEXT:    lui a0, %hi(.LCPI15_1)
 ; RV32-V128-NEXT:    addi a0, a0, %lo(.LCPI15_1)
 ; RV32-V128-NEXT:    vle32.v v24, (a0)
@@ -421,9 +419,9 @@ define <64 x i32> @interleave_v32i32(<32 x i32> %x, <32 x i32> %y) {
 ; RV64-V128-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
 ; RV64-V128-NEXT:    vle32.v v0, (a0)
 ; RV64-V128-NEXT:    vmv8r.v v24, v8
-; RV64-V128-NEXT:    addi a0, sp, 16
-; RV64-V128-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
 ; RV64-V128-NEXT:    vrgather.vv v8, v24, v0
+; RV64-V128-NEXT:    addi a0, sp, 16
+; RV64-V128-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
 ; RV64-V128-NEXT:    lui a0, %hi(.LCPI15_1)
 ; RV64-V128-NEXT:    addi a0, a0, %lo(.LCPI15_1)
 ; RV64-V128-NEXT:    vle32.v v24, (a0)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
index 9ce01467c83ba..7b28ce7ad654c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-int-shuffles.ll
@@ -635,7 +635,6 @@ define <4 x i16> @shuffle_shuffle_vslidedown(<16 x i16> %0) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v8, v8, 5
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8m2
 ; CHECK-NEXT:    ret
 entry:
   %1 = shufflevector <16 x i16> %0, <16 x i16> poison, <8 x i32> <i32 0, i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7>

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
index be781a983460c..b531df25cad7b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -12719,8 +12719,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    bgez a2, .LBB98_23
 ; RV64ZVE32F-NEXT:  # %bb.22: # %cond.load37
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT:    vslidedown.vi v13, v12, 1
-; RV64ZVE32F-NEXT:    vmv.x.s a2, v13
+; RV64ZVE32F-NEXT:    vslidedown.vi v9, v12, 1
+; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lb a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
@@ -12731,7 +12731,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:  .LBB98_23: # %else38
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    slli a2, a1, 49
-; RV64ZVE32F-NEXT:    vslidedown.vi v12, v12, 2
+; RV64ZVE32F-NEXT:    vslidedown.vi v9, v12, 2
 ; RV64ZVE32F-NEXT:    bltz a2, .LBB98_55
 ; RV64ZVE32F-NEXT:  # %bb.24: # %else41
 ; RV64ZVE32F-NEXT:    slli a2, a1, 48
@@ -12744,8 +12744,8 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    bgez a2, .LBB98_28
 ; RV64ZVE32F-NEXT:  .LBB98_27: # %cond.load49
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT:    vslidedown.vi v12, v8, 1
-; RV64ZVE32F-NEXT:    vmv.x.s a2, v12
+; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 1
+; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lb a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
@@ -12756,10 +12756,10 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:  .LBB98_28: # %else50
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    slli a2, a1, 45
-; RV64ZVE32F-NEXT:    vslidedown.vi v13, v8, 2
+; RV64ZVE32F-NEXT:    vslidedown.vi v12, v8, 2
 ; RV64ZVE32F-NEXT:    bgez a2, .LBB98_30
 ; RV64ZVE32F-NEXT:  # %bb.29: # %cond.load52
-; RV64ZVE32F-NEXT:    vmv.x.s a2, v13
+; RV64ZVE32F-NEXT:    vmv.x.s a2, v12
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lb a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
@@ -12770,7 +12770,7 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:  .LBB98_30: # %else53
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    slli a2, a1, 44
-; RV64ZVE32F-NEXT:    vslidedown.vi v12, v8, 4
+; RV64ZVE32F-NEXT:    vslidedown.vi v9, v8, 4
 ; RV64ZVE32F-NEXT:    bltz a2, .LBB98_58
 ; RV64ZVE32F-NEXT:  # %bb.31: # %else56
 ; RV64ZVE32F-NEXT:    slli a2, a1, 43
@@ -12780,21 +12780,21 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    bgez a2, .LBB98_34
 ; RV64ZVE32F-NEXT:  .LBB98_33: # %cond.load61
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT:    vslidedown.vi v13, v12, 1
-; RV64ZVE32F-NEXT:    vmv.x.s a2, v13
+; RV64ZVE32F-NEXT:    vslidedown.vi v12, v9, 1
+; RV64ZVE32F-NEXT:    vmv.x.s a2, v12
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lb a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
 ; RV64ZVE32F-NEXT:    vsetvli zero, a3, e8, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v14, a2
+; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 22, e8, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 21
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 21
 ; RV64ZVE32F-NEXT:  .LBB98_34: # %else62
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v8, v8, 8
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    slli a2, a1, 41
-; RV64ZVE32F-NEXT:    vslidedown.vi v9, v12, 2
+; RV64ZVE32F-NEXT:    vslidedown.vi v9, v9, 2
 ; RV64ZVE32F-NEXT:    bltz a2, .LBB98_60
 ; RV64ZVE32F-NEXT:  # %bb.35: # %else65
 ; RV64ZVE32F-NEXT:    slli a2, a1, 40
@@ -12948,20 +12948,20 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    bnez a2, .LBB98_14
 ; RV64ZVE32F-NEXT:    j .LBB98_15
 ; RV64ZVE32F-NEXT:  .LBB98_55: # %cond.load40
-; RV64ZVE32F-NEXT:    vmv.x.s a2, v12
+; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lb a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
 ; RV64ZVE32F-NEXT:    vsetvli zero, a3, e8, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v14, a2
+; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 15, e8, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 14
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 14
 ; RV64ZVE32F-NEXT:    slli a2, a1, 48
 ; RV64ZVE32F-NEXT:    bgez a2, .LBB98_25
 ; RV64ZVE32F-NEXT:  .LBB98_56: # %cond.load43
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT:    vslidedown.vi v12, v12, 1
-; RV64ZVE32F-NEXT:    vmv.x.s a2, v12
+; RV64ZVE32F-NEXT:    vslidedown.vi v9, v9, 1
+; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lb a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
@@ -12986,27 +12986,27 @@ define <32 x i8> @mgather_baseidx_v32i8(ptr %base, <32 x i8> %idxs, <32 x i1> %m
 ; RV64ZVE32F-NEXT:    j .LBB98_28
 ; RV64ZVE32F-NEXT:  .LBB98_58: # %cond.load55
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT:    vslidedown.vi v13, v13, 1
-; RV64ZVE32F-NEXT:    vmv.x.s a2, v13
+; RV64ZVE32F-NEXT:    vslidedown.vi v12, v12, 1
+; RV64ZVE32F-NEXT:    vmv.x.s a2, v12
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lb a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
 ; RV64ZVE32F-NEXT:    vsetvli zero, a3, e8, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v14, a2
+; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 20, e8, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 19
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 19
 ; RV64ZVE32F-NEXT:    slli a2, a1, 43
 ; RV64ZVE32F-NEXT:    bgez a2, .LBB98_32
 ; RV64ZVE32F-NEXT:  .LBB98_59: # %cond.load58
 ; RV64ZVE32F-NEXT:    vsetivli zero, 0, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT:    vmv.x.s a2, v12
+; RV64ZVE32F-NEXT:    vmv.x.s a2, v9
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    lb a2, 0(a2)
 ; RV64ZVE32F-NEXT:    li a3, 32
 ; RV64ZVE32F-NEXT:    vsetvli zero, a3, e8, m1, ta, ma
-; RV64ZVE32F-NEXT:    vmv.s.x v14, a2
+; RV64ZVE32F-NEXT:    vmv.s.x v12, a2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 21, e8, m2, tu, ma
-; RV64ZVE32F-NEXT:    vslideup.vi v10, v14, 20
+; RV64ZVE32F-NEXT:    vslideup.vi v10, v12, 20
 ; RV64ZVE32F-NEXT:    slli a2, a1, 42
 ; RV64ZVE32F-NEXT:    bltz a2, .LBB98_33
 ; RV64ZVE32F-NEXT:    j .LBB98_34

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
index aecbd1fbc5f15..65fe75047526d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -11089,8 +11089,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
 ; RV64ZVE32F-NEXT:    bgez a2, .LBB92_23
 ; RV64ZVE32F-NEXT:  # %bb.22: # %cond.store25
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT:    vslidedown.vi v13, v12, 1
-; RV64ZVE32F-NEXT:    vmv.x.s a2, v13
+; RV64ZVE32F-NEXT:    vslidedown.vi v11, v12, 1
+; RV64ZVE32F-NEXT:    vmv.x.s a2, v11
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v14, v8, 13
@@ -11098,7 +11098,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
 ; RV64ZVE32F-NEXT:  .LBB92_23: # %else26
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    slli a2, a1, 49
-; RV64ZVE32F-NEXT:    vslidedown.vi v12, v12, 2
+; RV64ZVE32F-NEXT:    vslidedown.vi v11, v12, 2
 ; RV64ZVE32F-NEXT:    bltz a2, .LBB92_55
 ; RV64ZVE32F-NEXT:  # %bb.24: # %else28
 ; RV64ZVE32F-NEXT:    slli a2, a1, 48
@@ -11111,8 +11111,8 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
 ; RV64ZVE32F-NEXT:    bgez a2, .LBB92_28
 ; RV64ZVE32F-NEXT:  .LBB92_27: # %cond.store33
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT:    vslidedown.vi v12, v10, 1
-; RV64ZVE32F-NEXT:    vmv.x.s a2, v12
+; RV64ZVE32F-NEXT:    vslidedown.vi v11, v10, 1
+; RV64ZVE32F-NEXT:    vmv.x.s a2, v11
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v12, v8, 17
@@ -11120,10 +11120,10 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
 ; RV64ZVE32F-NEXT:  .LBB92_28: # %else34
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    slli a2, a1, 45
-; RV64ZVE32F-NEXT:    vslidedown.vi v13, v10, 2
+; RV64ZVE32F-NEXT:    vslidedown.vi v12, v10, 2
 ; RV64ZVE32F-NEXT:    bgez a2, .LBB92_30
 ; RV64ZVE32F-NEXT:  # %bb.29: # %cond.store35
-; RV64ZVE32F-NEXT:    vmv.x.s a2, v13
+; RV64ZVE32F-NEXT:    vmv.x.s a2, v12
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v14, v8, 18
@@ -11131,7 +11131,7 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
 ; RV64ZVE32F-NEXT:  .LBB92_30: # %else36
 ; RV64ZVE32F-NEXT:    vsetivli zero, 4, e8, mf2, ta, ma
 ; RV64ZVE32F-NEXT:    slli a2, a1, 44
-; RV64ZVE32F-NEXT:    vslidedown.vi v12, v10, 4
+; RV64ZVE32F-NEXT:    vslidedown.vi v11, v10, 4
 ; RV64ZVE32F-NEXT:    bltz a2, .LBB92_58
 ; RV64ZVE32F-NEXT:  # %bb.31: # %else38
 ; RV64ZVE32F-NEXT:    slli a2, a1, 43
@@ -11141,18 +11141,18 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
 ; RV64ZVE32F-NEXT:    bgez a2, .LBB92_34
 ; RV64ZVE32F-NEXT:  .LBB92_33: # %cond.store41
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT:    vslidedown.vi v13, v12, 1
-; RV64ZVE32F-NEXT:    vmv.x.s a2, v13
+; RV64ZVE32F-NEXT:    vslidedown.vi v12, v11, 1
+; RV64ZVE32F-NEXT:    vmv.x.s a2, v12
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
-; RV64ZVE32F-NEXT:    vslidedown.vi v14, v8, 21
-; RV64ZVE32F-NEXT:    vse8.v v14, (a2)
+; RV64ZVE32F-NEXT:    vslidedown.vi v12, v8, 21
+; RV64ZVE32F-NEXT:    vse8.v v12, (a2)
 ; RV64ZVE32F-NEXT:  .LBB92_34: # %else42
 ; RV64ZVE32F-NEXT:    vsetivli zero, 8, e8, m1, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v10, v10, 8
 ; RV64ZVE32F-NEXT:    vsetivli zero, 2, e8, mf4, ta, ma
 ; RV64ZVE32F-NEXT:    slli a2, a1, 41
-; RV64ZVE32F-NEXT:    vslidedown.vi v11, v12, 2
+; RV64ZVE32F-NEXT:    vslidedown.vi v11, v11, 2
 ; RV64ZVE32F-NEXT:    bltz a2, .LBB92_60
 ; RV64ZVE32F-NEXT:  # %bb.35: # %else44
 ; RV64ZVE32F-NEXT:    slli a2, a1, 40
@@ -11275,17 +11275,17 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
 ; RV64ZVE32F-NEXT:    bnez a2, .LBB92_14
 ; RV64ZVE32F-NEXT:    j .LBB92_15
 ; RV64ZVE32F-NEXT:  .LBB92_55: # %cond.store27
-; RV64ZVE32F-NEXT:    vmv.x.s a2, v12
+; RV64ZVE32F-NEXT:    vmv.x.s a2, v11
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
-; RV64ZVE32F-NEXT:    vslidedown.vi v14, v8, 14
-; RV64ZVE32F-NEXT:    vse8.v v14, (a2)
+; RV64ZVE32F-NEXT:    vslidedown.vi v12, v8, 14
+; RV64ZVE32F-NEXT:    vse8.v v12, (a2)
 ; RV64ZVE32F-NEXT:    slli a2, a1, 48
 ; RV64ZVE32F-NEXT:    bgez a2, .LBB92_25
 ; RV64ZVE32F-NEXT:  .LBB92_56: # %cond.store29
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT:    vslidedown.vi v12, v12, 1
-; RV64ZVE32F-NEXT:    vmv.x.s a2, v12
+; RV64ZVE32F-NEXT:    vslidedown.vi v11, v11, 1
+; RV64ZVE32F-NEXT:    vmv.x.s a2, v11
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
 ; RV64ZVE32F-NEXT:    vslidedown.vi v12, v8, 15
@@ -11304,21 +11304,21 @@ define void @mscatter_baseidx_v32i8(<32 x i8> %val, ptr %base, <32 x i8> %idxs,
 ; RV64ZVE32F-NEXT:    j .LBB92_28
 ; RV64ZVE32F-NEXT:  .LBB92_58: # %cond.store37
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT:    vslidedown.vi v13, v13, 1
-; RV64ZVE32F-NEXT:    vmv.x.s a2, v13
+; RV64ZVE32F-NEXT:    vslidedown.vi v12, v12, 1
+; RV64ZVE32F-NEXT:    vmv.x.s a2, v12
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
-; RV64ZVE32F-NEXT:    vslidedown.vi v14, v8, 19
-; RV64ZVE32F-NEXT:    vse8.v v14, (a2)
+; RV64ZVE32F-NEXT:    vslidedown.vi v12, v8, 19
+; RV64ZVE32F-NEXT:    vse8.v v12, (a2)
 ; RV64ZVE32F-NEXT:    slli a2, a1, 43
 ; RV64ZVE32F-NEXT:    bgez a2, .LBB92_32
 ; RV64ZVE32F-NEXT:  .LBB92_59: # %cond.store39
 ; RV64ZVE32F-NEXT:    vsetivli zero, 0, e8, mf4, ta, ma
-; RV64ZVE32F-NEXT:    vmv.x.s a2, v12
+; RV64ZVE32F-NEXT:    vmv.x.s a2, v11
 ; RV64ZVE32F-NEXT:    add a2, a0, a2
 ; RV64ZVE32F-NEXT:    vsetivli zero, 1, e8, m2, ta, ma
-; RV64ZVE32F-NEXT:    vslidedown.vi v14, v8, 20
-; RV64ZVE32F-NEXT:    vse8.v v14, (a2)
+; RV64ZVE32F-NEXT:    vslidedown.vi v12, v8, 20
+; RV64ZVE32F-NEXT:    vse8.v v12, (a2)
 ; RV64ZVE32F-NEXT:    slli a2, a1, 42
 ; RV64ZVE32F-NEXT:    bltz a2, .LBB92_33
 ; RV64ZVE32F-NEXT:    j .LBB92_34

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
index 8bad561fadfba..3feb976fc0e18 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-fp.ll
@@ -693,11 +693,11 @@ define float @vreduce_fwadd_v64f32(ptr %x, float %s) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v16, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vfmv.s.f v24, fa0
+; CHECK-NEXT:    vfmv.s.f v12, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vfwadd.vv v0, v8, v16
+; CHECK-NEXT:    vfwadd.vv v24, v8, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vfredusum.vs v8, v0, v24
+; CHECK-NEXT:    vfredusum.vs v8, v24, v12
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
 ; CHECK-NEXT:    ret
   %v = load <64 x half>, ptr %x
@@ -716,9 +716,9 @@ define float @vreduce_ord_fwadd_v64f32(ptr %x, float %s) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v16, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vfmv.s.f v24, fa0
+; CHECK-NEXT:    vfmv.s.f v12, fa0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vfwredosum.vs v8, v8, v24
+; CHECK-NEXT:    vfwredosum.vs v8, v8, v12
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v8
 ; CHECK-NEXT:    vsetivli zero, 0, e32, m1, ta, ma
 ; CHECK-NEXT:    vfmv.f.s fa0, v8
@@ -1119,9 +1119,9 @@ define double @vreduce_ord_fwadd_v32f64(ptr %x, double %s) {
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vi v16, v8, 16
 ; CHECK-NEXT:    vsetivli zero, 16, e64, m1, ta, ma
-; CHECK-NEXT:    vfmv.s.f v24, fa0
+; CHECK-NEXT:    vfmv.s.f v12, fa0
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; CHECK-NEXT:    vfwredosum.vs v8, v8, v24
+; CHECK-NEXT:    vfwredosum.vs v8, v8, v12
 ; CHECK-NEXT:    vfwredosum.vs v8, v16, v8
 ; CHECK-NEXT:    vsetivli zero, 0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfmv.f.s fa0, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
index 45e91c50447b2..22b7841e9924c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-int.ll
@@ -535,11 +535,11 @@ define i16 @vwreduce_add_v128i16(ptr %x) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v16, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmv.s.x v24, zero
+; CHECK-NEXT:    vmv.s.x v12, zero
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwadd.vv v0, v8, v16
+; CHECK-NEXT:    vwadd.vv v24, v8, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vredsum.vs v8, v0, v24
+; CHECK-NEXT:    vredsum.vs v8, v24, v12
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %v = load <128 x i8>, ptr %x
@@ -558,11 +558,11 @@ define i16 @vwreduce_uadd_v128i16(ptr %x) {
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v16, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, ma
-; CHECK-NEXT:    vmv.s.x v24, zero
+; CHECK-NEXT:    vmv.s.x v12, zero
 ; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT:    vwaddu.vv v0, v8, v16
+; CHECK-NEXT:    vwaddu.vv v24, v8, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e16, m8, ta, ma
-; CHECK-NEXT:    vredsum.vs v8, v0, v24
+; CHECK-NEXT:    vredsum.vs v8, v24, v12
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %v = load <128 x i8>, ptr %x
@@ -902,11 +902,11 @@ define i32 @vwreduce_add_v64i32(ptr %x) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v16, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmv.s.x v24, zero
+; CHECK-NEXT:    vmv.s.x v12, zero
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vwadd.vv v0, v8, v16
+; CHECK-NEXT:    vwadd.vv v24, v8, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vredsum.vs v8, v0, v24
+; CHECK-NEXT:    vredsum.vs v8, v24, v12
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %v = load <64 x i16>, ptr %x
@@ -925,11 +925,11 @@ define i32 @vwreduce_uadd_v64i32(ptr %x) {
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, ma
 ; CHECK-NEXT:    vslidedown.vx v16, v8, a0
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, ma
-; CHECK-NEXT:    vmv.s.x v24, zero
+; CHECK-NEXT:    vmv.s.x v12, zero
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
-; CHECK-NEXT:    vwaddu.vv v0, v8, v16
+; CHECK-NEXT:    vwaddu.vv v24, v8, v16
 ; CHECK-NEXT:    vsetvli zero, zero, e32, m8, ta, ma
-; CHECK-NEXT:    vredsum.vs v8, v0, v24
+; CHECK-NEXT:    vredsum.vs v8, v24, v12
 ; CHECK-NEXT:    vmv.x.s a0, v8
 ; CHECK-NEXT:    ret
   %v = load <64 x i16>, ptr %x
@@ -1556,45 +1556,24 @@ define i64 @vwreduce_add_v64i64(ptr %x) {
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a1, a1, 5
+; RV32-NEXT:    slli a1, a1, 4
 ; RV32-NEXT:    sub sp, sp, a1
-; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
 ; RV32-NEXT:    addi a1, a0, 128
 ; RV32-NEXT:    li a2, 32
 ; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
 ; RV32-NEXT:    vle32.v v16, (a1)
 ; RV32-NEXT:    vsetivli zero, 16, e32, m8, ta, ma
 ; RV32-NEXT:    vslidedown.vi v24, v8, 16
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    li a1, 24
-; RV32-NEXT:    mul a0, a0, a1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
-; RV32-NEXT:    vslidedown.vi v24, v16, 16
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 3
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT:    vslidedown.vi v0, v16, 16
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    li a1, 24
-; RV32-NEXT:    mul a0, a0, a1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 3
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT:    vmv4r.v v8, v0
 ; RV32-NEXT:    vwadd.vv v0, v24, v8
 ; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 4
+; RV32-NEXT:    slli a0, a0, 3
 ; RV32-NEXT:    add a0, sp, a0
 ; RV32-NEXT:    addi a0, a0, 16
 ; RV32-NEXT:    vs8r.v v0, (a0) # Unknown-size Folded Spill
@@ -1603,7 +1582,7 @@ define i64 @vwreduce_add_v64i64(ptr %x) {
 ; RV32-NEXT:    vwadd.vv v0, v8, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 4
+; RV32-NEXT:    slli a0, a0, 3
 ; RV32-NEXT:    add a0, sp, a0
 ; RV32-NEXT:    addi a0, a0, 16
 ; RV32-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -1615,7 +1594,7 @@ define i64 @vwreduce_add_v64i64(ptr %x) {
 ; RV32-NEXT:    vsrl.vx v8, v8, a2
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    csrr a2, vlenb
-; RV32-NEXT:    slli a2, a2, 5
+; RV32-NEXT:    slli a2, a2, 4
 ; RV32-NEXT:    add sp, sp, a2
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -1625,45 +1604,24 @@ define i64 @vwreduce_add_v64i64(ptr %x) {
 ; RV64-NEXT:    addi sp, sp, -16
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    slli a1, a1, 5
+; RV64-NEXT:    slli a1, a1, 4
 ; RV64-NEXT:    sub sp, sp, a1
-; RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
 ; RV64-NEXT:    addi a1, a0, 128
 ; RV64-NEXT:    li a2, 32
 ; RV64-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; RV64-NEXT:    vle32.v v8, (a0)
-; RV64-NEXT:    addi a0, sp, 16
-; RV64-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
 ; RV64-NEXT:    vle32.v v16, (a1)
 ; RV64-NEXT:    vsetivli zero, 16, e32, m8, ta, ma
 ; RV64-NEXT:    vslidedown.vi v24, v8, 16
-; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    li a1, 24
-; RV64-NEXT:    mul a0, a0, a1
-; RV64-NEXT:    add a0, sp, a0
-; RV64-NEXT:    addi a0, a0, 16
-; RV64-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
-; RV64-NEXT:    vslidedown.vi v24, v16, 16
-; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    slli a0, a0, 3
-; RV64-NEXT:    add a0, sp, a0
-; RV64-NEXT:    addi a0, a0, 16
-; RV64-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV64-NEXT:    addi a0, sp, 16
+; RV64-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT:    vslidedown.vi v0, v16, 16
 ; RV64-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    li a1, 24
-; RV64-NEXT:    mul a0, a0, a1
-; RV64-NEXT:    add a0, sp, a0
-; RV64-NEXT:    addi a0, a0, 16
-; RV64-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    slli a0, a0, 3
-; RV64-NEXT:    add a0, sp, a0
-; RV64-NEXT:    addi a0, a0, 16
-; RV64-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT:    vmv4r.v v8, v0
 ; RV64-NEXT:    vwadd.vv v0, v24, v8
 ; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    slli a0, a0, 4
+; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    add a0, sp, a0
 ; RV64-NEXT:    addi a0, a0, 16
 ; RV64-NEXT:    vs8r.v v0, (a0) # Unknown-size Folded Spill
@@ -1672,7 +1630,7 @@ define i64 @vwreduce_add_v64i64(ptr %x) {
 ; RV64-NEXT:    vwadd.vv v0, v8, v16
 ; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    slli a0, a0, 4
+; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    add a0, sp, a0
 ; RV64-NEXT:    addi a0, a0, 16
 ; RV64-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -1681,7 +1639,7 @@ define i64 @vwreduce_add_v64i64(ptr %x) {
 ; RV64-NEXT:    vredsum.vs v8, v8, v16
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    slli a1, a1, 5
+; RV64-NEXT:    slli a1, a1, 4
 ; RV64-NEXT:    add sp, sp, a1
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret
@@ -1697,45 +1655,24 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) {
 ; RV32-NEXT:    addi sp, sp, -16
 ; RV32-NEXT:    .cfi_def_cfa_offset 16
 ; RV32-NEXT:    csrr a1, vlenb
-; RV32-NEXT:    slli a1, a1, 5
+; RV32-NEXT:    slli a1, a1, 4
 ; RV32-NEXT:    sub sp, sp, a1
-; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; RV32-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
 ; RV32-NEXT:    addi a1, a0, 128
 ; RV32-NEXT:    li a2, 32
 ; RV32-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; RV32-NEXT:    vle32.v v8, (a0)
-; RV32-NEXT:    addi a0, sp, 16
-; RV32-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
 ; RV32-NEXT:    vle32.v v16, (a1)
 ; RV32-NEXT:    vsetivli zero, 16, e32, m8, ta, ma
 ; RV32-NEXT:    vslidedown.vi v24, v8, 16
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    li a1, 24
-; RV32-NEXT:    mul a0, a0, a1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
-; RV32-NEXT:    vslidedown.vi v24, v16, 16
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 3
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV32-NEXT:    addi a0, sp, 16
+; RV32-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV32-NEXT:    vslidedown.vi v0, v16, 16
 ; RV32-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    li a1, 24
-; RV32-NEXT:    mul a0, a0, a1
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 3
-; RV32-NEXT:    add a0, sp, a0
-; RV32-NEXT:    addi a0, a0, 16
-; RV32-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV32-NEXT:    vmv4r.v v8, v0
 ; RV32-NEXT:    vwaddu.vv v0, v24, v8
 ; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 4
+; RV32-NEXT:    slli a0, a0, 3
 ; RV32-NEXT:    add a0, sp, a0
 ; RV32-NEXT:    addi a0, a0, 16
 ; RV32-NEXT:    vs8r.v v0, (a0) # Unknown-size Folded Spill
@@ -1744,7 +1681,7 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) {
 ; RV32-NEXT:    vwaddu.vv v0, v8, v16
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV32-NEXT:    csrr a0, vlenb
-; RV32-NEXT:    slli a0, a0, 4
+; RV32-NEXT:    slli a0, a0, 3
 ; RV32-NEXT:    add a0, sp, a0
 ; RV32-NEXT:    addi a0, a0, 16
 ; RV32-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -1756,7 +1693,7 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) {
 ; RV32-NEXT:    vsrl.vx v8, v8, a2
 ; RV32-NEXT:    vmv.x.s a1, v8
 ; RV32-NEXT:    csrr a2, vlenb
-; RV32-NEXT:    slli a2, a2, 5
+; RV32-NEXT:    slli a2, a2, 4
 ; RV32-NEXT:    add sp, sp, a2
 ; RV32-NEXT:    addi sp, sp, 16
 ; RV32-NEXT:    ret
@@ -1766,45 +1703,24 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) {
 ; RV64-NEXT:    addi sp, sp, -16
 ; RV64-NEXT:    .cfi_def_cfa_offset 16
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    slli a1, a1, 5
+; RV64-NEXT:    slli a1, a1, 4
 ; RV64-NEXT:    sub sp, sp, a1
-; RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb
+; RV64-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
 ; RV64-NEXT:    addi a1, a0, 128
 ; RV64-NEXT:    li a2, 32
 ; RV64-NEXT:    vsetvli zero, a2, e32, m8, ta, ma
 ; RV64-NEXT:    vle32.v v8, (a0)
-; RV64-NEXT:    addi a0, sp, 16
-; RV64-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
 ; RV64-NEXT:    vle32.v v16, (a1)
 ; RV64-NEXT:    vsetivli zero, 16, e32, m8, ta, ma
 ; RV64-NEXT:    vslidedown.vi v24, v8, 16
-; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    li a1, 24
-; RV64-NEXT:    mul a0, a0, a1
-; RV64-NEXT:    add a0, sp, a0
-; RV64-NEXT:    addi a0, a0, 16
-; RV64-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
-; RV64-NEXT:    vslidedown.vi v24, v16, 16
-; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    slli a0, a0, 3
-; RV64-NEXT:    add a0, sp, a0
-; RV64-NEXT:    addi a0, a0, 16
-; RV64-NEXT:    vs8r.v v24, (a0) # Unknown-size Folded Spill
+; RV64-NEXT:    addi a0, sp, 16
+; RV64-NEXT:    vs8r.v v8, (a0) # Unknown-size Folded Spill
+; RV64-NEXT:    vslidedown.vi v0, v16, 16
 ; RV64-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
-; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    li a1, 24
-; RV64-NEXT:    mul a0, a0, a1
-; RV64-NEXT:    add a0, sp, a0
-; RV64-NEXT:    addi a0, a0, 16
-; RV64-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    slli a0, a0, 3
-; RV64-NEXT:    add a0, sp, a0
-; RV64-NEXT:    addi a0, a0, 16
-; RV64-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; RV64-NEXT:    vmv4r.v v8, v0
 ; RV64-NEXT:    vwaddu.vv v0, v24, v8
 ; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    slli a0, a0, 4
+; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    add a0, sp, a0
 ; RV64-NEXT:    addi a0, a0, 16
 ; RV64-NEXT:    vs8r.v v0, (a0) # Unknown-size Folded Spill
@@ -1813,7 +1729,7 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) {
 ; RV64-NEXT:    vwaddu.vv v0, v8, v16
 ; RV64-NEXT:    vsetvli zero, zero, e64, m8, ta, ma
 ; RV64-NEXT:    csrr a0, vlenb
-; RV64-NEXT:    slli a0, a0, 4
+; RV64-NEXT:    slli a0, a0, 3
 ; RV64-NEXT:    add a0, sp, a0
 ; RV64-NEXT:    addi a0, a0, 16
 ; RV64-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
@@ -1822,7 +1738,7 @@ define i64 @vwreduce_uadd_v64i64(ptr %x) {
 ; RV64-NEXT:    vredsum.vs v8, v8, v16
 ; RV64-NEXT:    vmv.x.s a0, v8
 ; RV64-NEXT:    csrr a1, vlenb
-; RV64-NEXT:    slli a1, a1, 5
+; RV64-NEXT:    slli a1, a1, 4
 ; RV64-NEXT:    add sp, sp, a1
 ; RV64-NEXT:    addi sp, sp, 16
 ; RV64-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
index 12d7409de82c0..03f1a7e6a932e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/insert-subvector.ll
@@ -367,7 +367,6 @@ define <vscale x 32 x half> @insert_nxv32f16_nxv2f16_26(<vscale x 32 x half> %ve
 define <vscale x 32 x half> @insert_nxv32f16_undef_nxv1f16_0(<vscale x 1 x half> %subvec) {
 ; CHECK-LABEL: insert_nxv32f16_undef_nxv1f16_0:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8m8
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vector.insert.nxv1f16.nxv32f16(<vscale x 32 x half> undef, <vscale x 1 x half> %subvec, i64 0)
   ret <vscale x 32 x half> %v
@@ -381,8 +380,7 @@ define <vscale x 32 x half> @insert_nxv32f16_undef_nxv1f16_26(<vscale x 1 x half
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    add a1, a0, a1
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vslideup.vx v22, v8, a0
-; CHECK-NEXT:    vmv8r.v v8, v16
+; CHECK-NEXT:    vslideup.vx v14, v8, a0
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vector.insert.nxv1f16.nxv32f16(<vscale x 32 x half> undef, <vscale x 1 x half> %subvec, i64 26)
   ret <vscale x 32 x half> %v

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
index c88dfabc34eb3..b22f483ce90ca 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv32-spill-zvlsseg.ll
@@ -45,7 +45,6 @@ define <vscale x 1 x i32> @spill_zvlsseg_nxv1i32(ptr %base, i32 %vl) nounwind {
 ; SPILL-O2-NEXT:    vl1r.v v7, (a0) # Unknown-size Folded Reload
 ; SPILL-O2-NEXT:    add a0, a0, a1
 ; SPILL-O2-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; SPILL-O2-NEXT:    csrr a0, vlenb
 ; SPILL-O2-NEXT:    slli a0, a0, 1
 ; SPILL-O2-NEXT:    add sp, sp, a0
@@ -100,7 +99,6 @@ define <vscale x 2 x i32> @spill_zvlsseg_nxv2i32(ptr %base, i32 %vl) nounwind {
 ; SPILL-O2-NEXT:    vl1r.v v7, (a0) # Unknown-size Folded Reload
 ; SPILL-O2-NEXT:    add a0, a0, a1
 ; SPILL-O2-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; SPILL-O2-NEXT:    csrr a0, vlenb
 ; SPILL-O2-NEXT:    slli a0, a0, 1
 ; SPILL-O2-NEXT:    add sp, sp, a0
@@ -158,7 +156,6 @@ define <vscale x 4 x i32> @spill_zvlsseg_nxv4i32(ptr %base, i32 %vl) nounwind {
 ; SPILL-O2-NEXT:    vl2r.v v6, (a0) # Unknown-size Folded Reload
 ; SPILL-O2-NEXT:    add a0, a0, a1
 ; SPILL-O2-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; SPILL-O2-NEXT:    csrr a0, vlenb
 ; SPILL-O2-NEXT:    slli a0, a0, 2
 ; SPILL-O2-NEXT:    add sp, sp, a0
@@ -216,7 +213,6 @@ define <vscale x 8 x i32> @spill_zvlsseg_nxv8i32(ptr %base, i32 %vl) nounwind {
 ; SPILL-O2-NEXT:    vl4r.v v4, (a0) # Unknown-size Folded Reload
 ; SPILL-O2-NEXT:    add a0, a0, a1
 ; SPILL-O2-NEXT:    vl4r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; SPILL-O2-NEXT:    csrr a0, vlenb
 ; SPILL-O2-NEXT:    slli a0, a0, 3
 ; SPILL-O2-NEXT:    add sp, sp, a0
@@ -279,7 +275,6 @@ define <vscale x 4 x i32> @spill_zvlsseg3_nxv4i32(ptr %base, i32 %vl) nounwind {
 ; SPILL-O2-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; SPILL-O2-NEXT:    add a0, a0, a1
 ; SPILL-O2-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; SPILL-O2-NEXT:    csrr a0, vlenb
 ; SPILL-O2-NEXT:    li a1, 6
 ; SPILL-O2-NEXT:    mul a0, a0, a1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll
index 37f87fecc23fa..32e0060322d36 100644
--- a/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/rv64-spill-zvlsseg.ll
@@ -45,7 +45,6 @@ define <vscale x 1 x i32> @spill_zvlsseg_nxv1i32(ptr %base, i64 %vl) nounwind {
 ; SPILL-O2-NEXT:    vl1r.v v7, (a0) # Unknown-size Folded Reload
 ; SPILL-O2-NEXT:    add a0, a0, a1
 ; SPILL-O2-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; SPILL-O2-NEXT:    csrr a0, vlenb
 ; SPILL-O2-NEXT:    slli a0, a0, 1
 ; SPILL-O2-NEXT:    add sp, sp, a0
@@ -100,7 +99,6 @@ define <vscale x 2 x i32> @spill_zvlsseg_nxv2i32(ptr %base, i64 %vl) nounwind {
 ; SPILL-O2-NEXT:    vl1r.v v7, (a0) # Unknown-size Folded Reload
 ; SPILL-O2-NEXT:    add a0, a0, a1
 ; SPILL-O2-NEXT:    vl1r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; SPILL-O2-NEXT:    csrr a0, vlenb
 ; SPILL-O2-NEXT:    slli a0, a0, 1
 ; SPILL-O2-NEXT:    add sp, sp, a0
@@ -158,7 +156,6 @@ define <vscale x 4 x i32> @spill_zvlsseg_nxv4i32(ptr %base, i64 %vl) nounwind {
 ; SPILL-O2-NEXT:    vl2r.v v6, (a0) # Unknown-size Folded Reload
 ; SPILL-O2-NEXT:    add a0, a0, a1
 ; SPILL-O2-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; SPILL-O2-NEXT:    csrr a0, vlenb
 ; SPILL-O2-NEXT:    slli a0, a0, 2
 ; SPILL-O2-NEXT:    add sp, sp, a0
@@ -216,7 +213,6 @@ define <vscale x 8 x i32> @spill_zvlsseg_nxv8i32(ptr %base, i64 %vl) nounwind {
 ; SPILL-O2-NEXT:    vl4r.v v4, (a0) # Unknown-size Folded Reload
 ; SPILL-O2-NEXT:    add a0, a0, a1
 ; SPILL-O2-NEXT:    vl4r.v v8, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; SPILL-O2-NEXT:    csrr a0, vlenb
 ; SPILL-O2-NEXT:    slli a0, a0, 3
 ; SPILL-O2-NEXT:    add sp, sp, a0
@@ -279,7 +275,6 @@ define <vscale x 4 x i32> @spill_zvlsseg3_nxv4i32(ptr %base, i64 %vl) nounwind {
 ; SPILL-O2-NEXT:    vl2r.v v8, (a0) # Unknown-size Folded Reload
 ; SPILL-O2-NEXT:    add a0, a0, a1
 ; SPILL-O2-NEXT:    vl2r.v v10, (a0) # Unknown-size Folded Reload
-; SPILL-O2-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; SPILL-O2-NEXT:    csrr a0, vlenb
 ; SPILL-O2-NEXT:    li a1, 6
 ; SPILL-O2-NEXT:    mul a0, a0, a1

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
index 81675626bb055..efcfd326f129d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-fixed.ll
@@ -298,10 +298,9 @@ define {<2 x i64>, <2 x i64>} @vector_deinterleave_v2i64_v4i64(<4 x i64> %vec) {
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vrgather.vi v10, v8, 0
 ; CHECK-NEXT:    vrgather.vi v10, v12, 0, v0.t
-; CHECK-NEXT:    vrgather.vi v11, v8, 1
-; CHECK-NEXT:    vrgather.vi v11, v12, 1, v0.t
+; CHECK-NEXT:    vrgather.vi v9, v8, 1
+; CHECK-NEXT:    vrgather.vi v9, v12, 1, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    vmv.v.v v9, v11
 ; CHECK-NEXT:    ret
 %retval = call {<2 x i64>, <2 x i64>} @llvm.experimental.vector.deinterleave2.v4i64(<4 x i64> %vec)
 ret {<2 x i64>, <2 x i64>} %retval
@@ -388,10 +387,9 @@ define {<2 x double>, <2 x double>} @vector_deinterleave_v2f64_v4f64(<4 x double
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
 ; CHECK-NEXT:    vrgather.vi v10, v8, 0
 ; CHECK-NEXT:    vrgather.vi v10, v12, 0, v0.t
-; CHECK-NEXT:    vrgather.vi v11, v8, 1
-; CHECK-NEXT:    vrgather.vi v11, v12, 1, v0.t
+; CHECK-NEXT:    vrgather.vi v9, v8, 1
+; CHECK-NEXT:    vrgather.vi v9, v12, 1, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v10
-; CHECK-NEXT:    vmv.v.v v9, v11
 ; CHECK-NEXT:    ret
 %retval = call {<2 x double>, <2 x double>} @llvm.experimental.vector.deinterleave2.v4f64(<4 x double> %vec)
 ret {<2 x double>, <2 x double>} %retval

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
index 38d5196a82c75..914782e9839fd 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-fixed.ll
@@ -284,7 +284,6 @@ define <4 x i64> @vector_interleave_v4i64_v2i64(<2 x i64> %a, <2 x i64> %b) {
 ; RV32-LABEL: vector_interleave_v4i64_v2i64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vmv1r.v v10, v9
-; RV32-NEXT:    # kill: def $v8 killed $v8 def $v8m2
 ; RV32-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
 ; RV32-NEXT:    vmv.v.i v12, 0
 ; RV32-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
@@ -301,7 +300,6 @@ define <4 x i64> @vector_interleave_v4i64_v2i64(<2 x i64> %a, <2 x i64> %b) {
 ; RV64-LABEL: vector_interleave_v4i64_v2i64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vmv1r.v v10, v9
-; RV64-NEXT:    # kill: def $v8 killed $v8 def $v8m2
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64-NEXT:    vmv.v.i v12, 0
 ; RV64-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
@@ -394,7 +392,6 @@ define <4 x double> @vector_interleave_v4f64_v2f64(<2 x double> %a, <2 x double>
 ; RV32-LABEL: vector_interleave_v4f64_v2f64:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vmv1r.v v10, v9
-; RV32-NEXT:    # kill: def $v8 killed $v8 def $v8m2
 ; RV32-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV32-NEXT:    vmv.v.i v12, 0
 ; RV32-NEXT:    vsetivli zero, 2, e64, m2, tu, ma
@@ -411,7 +408,6 @@ define <4 x double> @vector_interleave_v4f64_v2f64(<2 x double> %a, <2 x double>
 ; RV64-LABEL: vector_interleave_v4f64_v2f64:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vmv1r.v v10, v9
-; RV64-NEXT:    # kill: def $v8 killed $v8 def $v8m2
 ; RV64-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
 ; RV64-NEXT:    vmv.v.i v12, 0
 ; RV64-NEXT:    vsetivli zero, 2, e64, m2, tu, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
index fb70a1ced5172..f6e6de24af91f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave.ll
@@ -60,7 +60,6 @@ define <vscale x 8 x i32> @vector_interleave_nxv8i32_nxv4i32(<vscale x 4 x i32>
 define <vscale x 4 x i64> @vector_interleave_nxv4i64_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i64> %b) {
 ; CHECK-LABEL: vector_interleave_nxv4i64_nxv2i64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v10m2 killed $v10m2 killed $v8m4 def $v8m4
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
@@ -70,7 +69,6 @@ define <vscale x 4 x i64> @vector_interleave_nxv4i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK-NEXT:    vsrl.vi v16, v12, 1
 ; CHECK-NEXT:    vadd.vx v16, v16, a0, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m4 def $v8m4
 ; CHECK-NEXT:    vrgatherei16.vv v12, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret
@@ -160,7 +158,6 @@ define <vscale x 8 x float> @vector_interleave_nxv8f32_nxv4f32(<vscale x 4 x flo
 define <vscale x 4 x double> @vector_interleave_nxv4f64_nxv2f64(<vscale x 2 x double> %a, <vscale x 2 x double> %b) {
 ; CHECK-LABEL: vector_interleave_nxv4f64_nxv2f64:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v10m2 killed $v10m2 killed $v8m4 def $v8m4
 ; CHECK-NEXT:    csrr a0, vlenb
 ; CHECK-NEXT:    srli a0, a0, 2
 ; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
@@ -170,7 +167,6 @@ define <vscale x 4 x double> @vector_interleave_nxv4f64_nxv2f64(<vscale x 2 x do
 ; CHECK-NEXT:    vsrl.vi v16, v12, 1
 ; CHECK-NEXT:    vadd.vx v16, v16, a0, v0.t
 ; CHECK-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m4 def $v8m4
 ; CHECK-NEXT:    vrgatherei16.vv v12, v8, v16, v0.t
 ; CHECK-NEXT:    vmv.v.v v8, v12
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
index f51945a8617b2..c8859a358fcae 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptosi-vp.ll
@@ -314,15 +314,7 @@ declare <vscale x 32 x i16> @llvm.vp.fptosi.nxv32i16.nxv32f32(<vscale x 32 x flo
 define <vscale x 32 x i16> @vfptosi_nxv32i16_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vfptosi_nxv32i16_nxv32f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
-; CHECK-NEXT:    sub sp, sp, a1
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
 ; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    addi a1, sp, 16
-; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    srli a2, a1, 2
 ; CHECK-NEXT:    vsetvli a3, zero, e8, mf2, ta, ma
@@ -333,20 +325,15 @@ define <vscale x 32 x i16> @vfptosi_nxv32i16_nxv32f32(<vscale x 32 x float> %va,
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v12, v16, v0.t
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v28, v16, v0.t
 ; CHECK-NEXT:    bltu a0, a1, .LBB25_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a0, a1
 ; CHECK-NEXT:  .LBB25_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vfncvt.rtz.x.f.w v8, v16, v0.t
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 3
-; CHECK-NEXT:    add sp, sp, a0
-; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    vfncvt.rtz.x.f.w v24, v8, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x i16> @llvm.vp.fptosi.nxv32i16.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
index cf8ace03d576e..9ef1dd3634e9f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptoui-vp.ll
@@ -314,15 +314,7 @@ declare <vscale x 32 x i16> @llvm.vp.fptoui.nxv32i16.nxv32f32(<vscale x 32 x flo
 define <vscale x 32 x i16> @vfptoui_nxv32i16_nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vfptoui_nxv32i16_nxv32f32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
-; CHECK-NEXT:    sub sp, sp, a1
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
 ; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    addi a1, sp, 16
-; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    srli a2, a1, 2
 ; CHECK-NEXT:    vsetvli a3, zero, e8, mf2, ta, ma
@@ -333,20 +325,15 @@ define <vscale x 32 x i16> @vfptoui_nxv32i16_nxv32f32(<vscale x 32 x float> %va,
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v12, v16, v0.t
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v28, v16, v0.t
 ; CHECK-NEXT:    bltu a0, a1, .LBB25_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a0, a1
 ; CHECK-NEXT:  .LBB25_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vfncvt.rtz.xu.f.w v8, v16, v0.t
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 3
-; CHECK-NEXT:    add sp, sp, a0
-; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    vfncvt.rtz.xu.f.w v24, v8, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x i16> @llvm.vp.fptoui.nxv32i16.nxv32f32(<vscale x 32 x float> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x i16> %v

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
index ec2eeaa8327b6..86b9a6c694ea2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vfptrunc-vp.ll
@@ -140,29 +140,25 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    li a3, 24
-; CHECK-NEXT:    mul a1, a1, a3
+; CHECK-NEXT:    slli a1, a1, 4
 ; CHECK-NEXT:    sub sp, sp, a1
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
 ; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
-; CHECK-NEXT:    add a1, sp, a1
-; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    addi a1, sp, 16
 ; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 4
+; CHECK-NEXT:    slli a1, a1, 3
 ; CHECK-NEXT:    add a1, sp, a1
 ; CHECK-NEXT:    addi a1, a1, 16
 ; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    srli a3, a1, 2
 ; CHECK-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v25, v0, a3
+; CHECK-NEXT:    vslidedown.vx v16, v0, a3
 ; CHECK-NEXT:    vsetvli a3, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    slli a3, a1, 3
 ; CHECK-NEXT:    add a3, a0, a3
-; CHECK-NEXT:    vl8re64.v v8, (a3)
+; CHECK-NEXT:    vl8re64.v v24, (a3)
 ; CHECK-NEXT:    slli a3, a1, 1
 ; CHECK-NEXT:    sub a4, a2, a3
 ; CHECK-NEXT:    sltu a5, a2, a4
@@ -173,22 +169,18 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
 ; CHECK-NEXT:    addi a6, a6, -1
 ; CHECK-NEXT:    and a6, a6, a5
 ; CHECK-NEXT:    srli a5, a1, 3
-; CHECK-NEXT:    vl8re64.v v16, (a0)
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT:    vslidedown.vx v0, v25, a5
+; CHECK-NEXT:    vl8re64.v v8, (a0)
+; CHECK-NEXT:    vslidedown.vx v0, v16, a5
 ; CHECK-NEXT:    vsetvli zero, a6, e32, m4, ta, ma
-; CHECK-NEXT:    vfncvt.f.f.w v20, v8, v0.t
+; CHECK-NEXT:    vfncvt.f.f.w v20, v24, v0.t
 ; CHECK-NEXT:    bltu a4, a1, .LBB8_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a4, a1
 ; CHECK-NEXT:  .LBB8_2:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v26, v1, a5
+; CHECK-NEXT:    vslidedown.vx v2, v1, a5
 ; CHECK-NEXT:    vsetvli zero, a4, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vfncvt.f.f.w v16, v8, v0.t
 ; CHECK-NEXT:    bltu a2, a3, .LBB8_4
 ; CHECK-NEXT:  # %bb.3:
@@ -199,13 +191,10 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a0, a3, a0
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v26
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 3
-; CHECK-NEXT:    add a0, sp, a0
-; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vfncvt.f.f.w v12, v24, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v2
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vfncvt.f.f.w v28, v8, v0.t
 ; CHECK-NEXT:    bltu a2, a1, .LBB8_6
 ; CHECK-NEXT:  # %bb.5:
 ; CHECK-NEXT:    mv a2, a1
@@ -213,14 +202,14 @@ define <vscale x 32 x float> @vfptrunc_nxv32f32_nxv32f64(<vscale x 32 x double>
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vfncvt.f.f.w v8, v24, v0.t
+; CHECK-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vfncvt.f.f.w v24, v8, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    li a1, 24
-; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll
index 8a5ddcf7eef71..ec4d954ed5fc2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv32.ll
@@ -24,7 +24,6 @@ define <vscale x 16 x i16> @test_vloxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -54,7 +53,6 @@ define <vscale x 16 x i16> @test_vloxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -84,7 +82,6 @@ define <vscale x 16 x i16> @test_vloxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -114,7 +111,6 @@ define <vscale x 1 x i8> @test_vloxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -144,7 +140,6 @@ define <vscale x 1 x i8> @test_vloxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -174,7 +169,6 @@ define <vscale x 1 x i8> @test_vloxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -201,12 +195,11 @@ entry:
 define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -233,12 +226,11 @@ entry:
 define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -265,12 +257,11 @@ entry:
 define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -298,9 +289,9 @@ define <vscale x 1 x i8> @test_vloxseg4_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -331,9 +322,9 @@ define <vscale x 1 x i8> @test_vloxseg4_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -364,9 +355,9 @@ define <vscale x 1 x i8> @test_vloxseg4_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -397,10 +388,10 @@ define <vscale x 1 x i8> @test_vloxseg5_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -431,10 +422,10 @@ define <vscale x 1 x i8> @test_vloxseg5_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -465,10 +456,10 @@ define <vscale x 1 x i8> @test_vloxseg5_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -499,11 +490,11 @@ define <vscale x 1 x i8> @test_vloxseg6_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -534,11 +525,11 @@ define <vscale x 1 x i8> @test_vloxseg6_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -569,11 +560,11 @@ define <vscale x 1 x i8> @test_vloxseg6_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -604,12 +595,12 @@ define <vscale x 1 x i8> @test_vloxseg7_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -640,12 +631,12 @@ define <vscale x 1 x i8> @test_vloxseg7_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -676,12 +667,12 @@ define <vscale x 1 x i8> @test_vloxseg7_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -712,13 +703,13 @@ define <vscale x 1 x i8> @test_vloxseg8_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -749,13 +740,13 @@ define <vscale x 1 x i8> @test_vloxseg8_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -786,13 +777,13 @@ define <vscale x 1 x i8> @test_vloxseg8_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -825,7 +816,6 @@ define <vscale x 16 x i8> @test_vloxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -855,7 +845,6 @@ define <vscale x 16 x i8> @test_vloxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -885,7 +874,6 @@ define <vscale x 16 x i8> @test_vloxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -913,10 +901,9 @@ define <vscale x 16 x i8> @test_vloxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8
 ; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vloxseg3ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -943,12 +930,11 @@ entry:
 define <vscale x 16 x i8> @test_vloxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -976,10 +962,9 @@ define <vscale x 16 x i8> @test_vloxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8
 ; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vloxseg3ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -1006,13 +991,12 @@ entry:
 define <vscale x 16 x i8> @test_vloxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vloxseg4ei16.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
+; CHECK-NEXT:    vloxseg4ei16.v v6, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -1040,9 +1024,9 @@ define <vscale x 16 x i8> @test_vloxseg4_mask_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -1073,11 +1057,10 @@ define <vscale x 16 x i8> @test_vloxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8
 ; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -1107,7 +1090,6 @@ define <vscale x 2 x i32> @test_vloxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -1137,7 +1119,6 @@ define <vscale x 2 x i32> @test_vloxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -1167,7 +1148,6 @@ define <vscale x 2 x i32> @test_vloxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -1194,12 +1174,11 @@ entry:
 define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -1226,12 +1205,11 @@ entry:
 define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -1258,12 +1236,11 @@ entry:
 define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -1291,9 +1268,9 @@ define <vscale x 2 x i32> @test_vloxseg4_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1324,9 +1301,9 @@ define <vscale x 2 x i32> @test_vloxseg4_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1357,9 +1334,9 @@ define <vscale x 2 x i32> @test_vloxseg4_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1390,10 +1367,10 @@ define <vscale x 2 x i32> @test_vloxseg5_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1424,10 +1401,10 @@ define <vscale x 2 x i32> @test_vloxseg5_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1458,10 +1435,10 @@ define <vscale x 2 x i32> @test_vloxseg5_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1492,11 +1469,11 @@ define <vscale x 2 x i32> @test_vloxseg6_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1527,11 +1504,11 @@ define <vscale x 2 x i32> @test_vloxseg6_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1562,11 +1539,11 @@ define <vscale x 2 x i32> @test_vloxseg6_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1597,12 +1574,12 @@ define <vscale x 2 x i32> @test_vloxseg7_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1633,12 +1610,12 @@ define <vscale x 2 x i32> @test_vloxseg7_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1669,12 +1646,12 @@ define <vscale x 2 x i32> @test_vloxseg7_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1705,13 +1682,13 @@ define <vscale x 2 x i32> @test_vloxseg8_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1742,13 +1719,13 @@ define <vscale x 2 x i32> @test_vloxseg8_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1779,13 +1756,13 @@ define <vscale x 2 x i32> @test_vloxseg8_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1818,7 +1795,6 @@ define <vscale x 4 x i16> @test_vloxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -1848,7 +1824,6 @@ define <vscale x 4 x i16> @test_vloxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -1878,7 +1853,6 @@ define <vscale x 4 x i16> @test_vloxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -1905,12 +1879,11 @@ entry:
 define <vscale x 4 x i16> @test_vloxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -1937,12 +1910,11 @@ entry:
 define <vscale x 4 x i16> @test_vloxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -1970,10 +1942,9 @@ define <vscale x 4 x i16> @test_vloxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -2001,9 +1972,9 @@ define <vscale x 4 x i16> @test_vloxseg4_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2034,9 +2005,9 @@ define <vscale x 4 x i16> @test_vloxseg4_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2066,13 +2037,12 @@ entry:
 define <vscale x 4 x i16> @test_vloxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vloxseg4ei32.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -2100,10 +2070,10 @@ define <vscale x 4 x i16> @test_vloxseg5_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2134,10 +2104,10 @@ define <vscale x 4 x i16> @test_vloxseg5_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2168,10 +2138,10 @@ define <vscale x 4 x i16> @test_vloxseg5_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -2202,11 +2172,11 @@ define <vscale x 4 x i16> @test_vloxseg6_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2237,11 +2207,11 @@ define <vscale x 4 x i16> @test_vloxseg6_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2272,11 +2242,11 @@ define <vscale x 4 x i16> @test_vloxseg6_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -2307,12 +2277,12 @@ define <vscale x 4 x i16> @test_vloxseg7_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2343,12 +2313,12 @@ define <vscale x 4 x i16> @test_vloxseg7_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2379,12 +2349,12 @@ define <vscale x 4 x i16> @test_vloxseg7_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -2415,13 +2385,13 @@ define <vscale x 4 x i16> @test_vloxseg8_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2452,13 +2422,13 @@ define <vscale x 4 x i16> @test_vloxseg8_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2489,13 +2459,13 @@ define <vscale x 4 x i16> @test_vloxseg8_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -2528,7 +2498,6 @@ define <vscale x 1 x i32> @test_vloxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2558,7 +2527,6 @@ define <vscale x 1 x i32> @test_vloxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2588,7 +2556,6 @@ define <vscale x 1 x i32> @test_vloxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2615,12 +2582,11 @@ entry:
 define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2647,12 +2613,11 @@ entry:
 define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2679,12 +2644,11 @@ entry:
 define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2712,9 +2676,9 @@ define <vscale x 1 x i32> @test_vloxseg4_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2745,9 +2709,9 @@ define <vscale x 1 x i32> @test_vloxseg4_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2778,9 +2742,9 @@ define <vscale x 1 x i32> @test_vloxseg4_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2811,10 +2775,10 @@ define <vscale x 1 x i32> @test_vloxseg5_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2845,10 +2809,10 @@ define <vscale x 1 x i32> @test_vloxseg5_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2879,10 +2843,10 @@ define <vscale x 1 x i32> @test_vloxseg5_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2913,11 +2877,11 @@ define <vscale x 1 x i32> @test_vloxseg6_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2948,11 +2912,11 @@ define <vscale x 1 x i32> @test_vloxseg6_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2983,11 +2947,11 @@ define <vscale x 1 x i32> @test_vloxseg6_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3018,12 +2982,12 @@ define <vscale x 1 x i32> @test_vloxseg7_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3054,12 +3018,12 @@ define <vscale x 1 x i32> @test_vloxseg7_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3090,12 +3054,12 @@ define <vscale x 1 x i32> @test_vloxseg7_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3126,13 +3090,13 @@ define <vscale x 1 x i32> @test_vloxseg8_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3163,13 +3127,13 @@ define <vscale x 1 x i32> @test_vloxseg8_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3200,13 +3164,13 @@ define <vscale x 1 x i32> @test_vloxseg8_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3239,7 +3203,6 @@ define <vscale x 8 x i16> @test_vloxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3269,7 +3232,6 @@ define <vscale x 8 x i16> @test_vloxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3299,7 +3261,6 @@ define <vscale x 8 x i16> @test_vloxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3326,12 +3287,11 @@ entry:
 define <vscale x 8 x i16> @test_vloxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3358,12 +3318,11 @@ entry:
 define <vscale x 8 x i16> @test_vloxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3391,10 +3350,9 @@ define <vscale x 8 x i16> @test_vloxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16>
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3422,9 +3380,9 @@ define <vscale x 8 x i16> @test_vloxseg4_mask_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -3455,9 +3413,9 @@ define <vscale x 8 x i16> @test_vloxseg4_mask_nxv8i16_nxv8i8(<vscale x 8 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -3487,13 +3445,12 @@ entry:
 define <vscale x 8 x i16> @test_vloxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
+; CHECK-NEXT:    vloxseg4ei32.v v6, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3523,7 +3480,6 @@ define <vscale x 8 x i8> @test_vloxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3553,7 +3509,6 @@ define <vscale x 8 x i8> @test_vloxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3583,7 +3538,6 @@ define <vscale x 8 x i8> @test_vloxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3611,10 +3565,9 @@ define <vscale x 8 x i8> @test_vloxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3641,12 +3594,11 @@ entry:
 define <vscale x 8 x i8> @test_vloxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3674,10 +3626,9 @@ define <vscale x 8 x i8> @test_vloxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3704,13 +3655,12 @@ entry:
 define <vscale x 8 x i8> @test_vloxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vloxseg4ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vloxseg4ei16.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3738,9 +3688,9 @@ define <vscale x 8 x i8> @test_vloxseg4_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3771,11 +3721,10 @@ define <vscale x 8 x i8> @test_vloxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3803,10 +3752,10 @@ define <vscale x 8 x i8> @test_vloxseg5_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -3837,10 +3786,10 @@ define <vscale x 8 x i8> @test_vloxseg5_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3871,12 +3820,11 @@ define <vscale x 8 x i8> @test_vloxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3904,11 +3852,11 @@ define <vscale x 8 x i8> @test_vloxseg6_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -3939,11 +3887,11 @@ define <vscale x 8 x i8> @test_vloxseg6_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3973,15 +3921,14 @@ entry:
 define <vscale x 8 x i8> @test_vloxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vloxseg6ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
+; CHECK-NEXT:    vloxseg6ei32.v v7, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -4009,12 +3956,12 @@ define <vscale x 8 x i8> @test_vloxseg7_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -4045,12 +3992,12 @@ define <vscale x 8 x i8> @test_vloxseg7_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4081,12 +4028,12 @@ define <vscale x 8 x i8> @test_vloxseg7_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -4117,13 +4064,13 @@ define <vscale x 8 x i8> @test_vloxseg8_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -4154,13 +4101,13 @@ define <vscale x 8 x i8> @test_vloxseg8_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4191,13 +4138,13 @@ define <vscale x 8 x i8> @test_vloxseg8_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -4230,7 +4177,6 @@ define <vscale x 8 x i32> @test_vloxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -4260,7 +4206,6 @@ define <vscale x 8 x i32> @test_vloxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -4290,7 +4235,6 @@ define <vscale x 8 x i32> @test_vloxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -4320,7 +4264,6 @@ define <vscale x 4 x i8> @test_vloxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4350,7 +4293,6 @@ define <vscale x 4 x i8> @test_vloxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4380,7 +4322,6 @@ define <vscale x 4 x i8> @test_vloxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4407,12 +4348,11 @@ entry:
 define <vscale x 4 x i8> @test_vloxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4439,12 +4379,11 @@ entry:
 define <vscale x 4 x i8> @test_vloxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4472,10 +4411,9 @@ define <vscale x 4 x i8> @test_vloxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4503,9 +4441,9 @@ define <vscale x 4 x i8> @test_vloxseg4_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4536,9 +4474,9 @@ define <vscale x 4 x i8> @test_vloxseg4_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4568,13 +4506,12 @@ entry:
 define <vscale x 4 x i8> @test_vloxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vloxseg4ei32.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4602,10 +4539,10 @@ define <vscale x 4 x i8> @test_vloxseg5_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4636,10 +4573,10 @@ define <vscale x 4 x i8> @test_vloxseg5_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4670,10 +4607,10 @@ define <vscale x 4 x i8> @test_vloxseg5_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -4704,11 +4641,11 @@ define <vscale x 4 x i8> @test_vloxseg6_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4739,11 +4676,11 @@ define <vscale x 4 x i8> @test_vloxseg6_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4774,11 +4711,11 @@ define <vscale x 4 x i8> @test_vloxseg6_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -4809,12 +4746,12 @@ define <vscale x 4 x i8> @test_vloxseg7_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4845,12 +4782,12 @@ define <vscale x 4 x i8> @test_vloxseg7_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4881,12 +4818,12 @@ define <vscale x 4 x i8> @test_vloxseg7_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -4917,13 +4854,13 @@ define <vscale x 4 x i8> @test_vloxseg8_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4954,13 +4891,13 @@ define <vscale x 4 x i8> @test_vloxseg8_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4991,13 +4928,13 @@ define <vscale x 4 x i8> @test_vloxseg8_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -5030,7 +4967,6 @@ define <vscale x 1 x i16> @test_vloxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -5060,7 +4996,6 @@ define <vscale x 1 x i16> @test_vloxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -5090,7 +5025,6 @@ define <vscale x 1 x i16> @test_vloxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -5117,12 +5051,11 @@ entry:
 define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -5149,12 +5082,11 @@ entry:
 define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -5181,12 +5113,11 @@ entry:
 define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -5214,9 +5145,9 @@ define <vscale x 1 x i16> @test_vloxseg4_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5247,9 +5178,9 @@ define <vscale x 1 x i16> @test_vloxseg4_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5280,9 +5211,9 @@ define <vscale x 1 x i16> @test_vloxseg4_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5313,10 +5244,10 @@ define <vscale x 1 x i16> @test_vloxseg5_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5347,10 +5278,10 @@ define <vscale x 1 x i16> @test_vloxseg5_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5381,10 +5312,10 @@ define <vscale x 1 x i16> @test_vloxseg5_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5415,11 +5346,11 @@ define <vscale x 1 x i16> @test_vloxseg6_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5450,11 +5381,11 @@ define <vscale x 1 x i16> @test_vloxseg6_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5485,11 +5416,11 @@ define <vscale x 1 x i16> @test_vloxseg6_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5520,12 +5451,12 @@ define <vscale x 1 x i16> @test_vloxseg7_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5556,12 +5487,12 @@ define <vscale x 1 x i16> @test_vloxseg7_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5592,12 +5523,12 @@ define <vscale x 1 x i16> @test_vloxseg7_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5628,13 +5559,13 @@ define <vscale x 1 x i16> @test_vloxseg8_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5665,13 +5596,13 @@ define <vscale x 1 x i16> @test_vloxseg8_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5702,13 +5633,13 @@ define <vscale x 1 x i16> @test_vloxseg8_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5741,7 +5672,6 @@ define <vscale x 32 x i8> @test_vloxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v4, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
@@ -5771,7 +5701,6 @@ define <vscale x 32 x i8> @test_vloxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
@@ -5801,7 +5730,6 @@ define <vscale x 2 x i8> @test_vloxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -5831,7 +5759,6 @@ define <vscale x 2 x i8> @test_vloxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -5861,7 +5788,6 @@ define <vscale x 2 x i8> @test_vloxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -5888,12 +5814,11 @@ entry:
 define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -5920,12 +5845,11 @@ entry:
 define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -5952,12 +5876,11 @@ entry:
 define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -5985,9 +5908,9 @@ define <vscale x 2 x i8> @test_vloxseg4_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6018,9 +5941,9 @@ define <vscale x 2 x i8> @test_vloxseg4_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6051,9 +5974,9 @@ define <vscale x 2 x i8> @test_vloxseg4_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6084,10 +6007,10 @@ define <vscale x 2 x i8> @test_vloxseg5_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6118,10 +6041,10 @@ define <vscale x 2 x i8> @test_vloxseg5_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6152,10 +6075,10 @@ define <vscale x 2 x i8> @test_vloxseg5_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6186,11 +6109,11 @@ define <vscale x 2 x i8> @test_vloxseg6_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6221,11 +6144,11 @@ define <vscale x 2 x i8> @test_vloxseg6_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6256,11 +6179,11 @@ define <vscale x 2 x i8> @test_vloxseg6_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6291,12 +6214,12 @@ define <vscale x 2 x i8> @test_vloxseg7_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6327,12 +6250,12 @@ define <vscale x 2 x i8> @test_vloxseg7_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6363,12 +6286,12 @@ define <vscale x 2 x i8> @test_vloxseg7_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6399,13 +6322,13 @@ define <vscale x 2 x i8> @test_vloxseg8_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6436,13 +6359,13 @@ define <vscale x 2 x i8> @test_vloxseg8_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6473,13 +6396,13 @@ define <vscale x 2 x i8> @test_vloxseg8_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6512,7 +6435,6 @@ define <vscale x 2 x i16> @test_vloxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -6542,7 +6464,6 @@ define <vscale x 2 x i16> @test_vloxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -6572,7 +6493,6 @@ define <vscale x 2 x i16> @test_vloxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -6599,12 +6519,11 @@ entry:
 define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -6631,12 +6550,11 @@ entry:
 define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -6663,12 +6581,11 @@ entry:
 define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -6696,9 +6613,9 @@ define <vscale x 2 x i16> @test_vloxseg4_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6729,9 +6646,9 @@ define <vscale x 2 x i16> @test_vloxseg4_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6762,9 +6679,9 @@ define <vscale x 2 x i16> @test_vloxseg4_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6795,10 +6712,10 @@ define <vscale x 2 x i16> @test_vloxseg5_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6829,10 +6746,10 @@ define <vscale x 2 x i16> @test_vloxseg5_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6863,10 +6780,10 @@ define <vscale x 2 x i16> @test_vloxseg5_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6897,11 +6814,11 @@ define <vscale x 2 x i16> @test_vloxseg6_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6932,11 +6849,11 @@ define <vscale x 2 x i16> @test_vloxseg6_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6967,11 +6884,11 @@ define <vscale x 2 x i16> @test_vloxseg6_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7002,12 +6919,12 @@ define <vscale x 2 x i16> @test_vloxseg7_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7038,12 +6955,12 @@ define <vscale x 2 x i16> @test_vloxseg7_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7074,12 +6991,12 @@ define <vscale x 2 x i16> @test_vloxseg7_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7110,13 +7027,13 @@ define <vscale x 2 x i16> @test_vloxseg8_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7147,13 +7064,13 @@ define <vscale x 2 x i16> @test_vloxseg8_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7184,13 +7101,13 @@ define <vscale x 2 x i16> @test_vloxseg8_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7223,7 +7140,6 @@ define <vscale x 4 x i32> @test_vloxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7253,7 +7169,6 @@ define <vscale x 4 x i32> @test_vloxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7283,7 +7198,6 @@ define <vscale x 4 x i32> @test_vloxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7310,12 +7224,11 @@ entry:
 define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7342,12 +7255,11 @@ entry:
 define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7374,12 +7286,11 @@ entry:
 define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei32.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7407,9 +7318,9 @@ define <vscale x 4 x i32> @test_vloxseg4_mask_nxv4i32_nxv4i16(<vscale x 4 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -7440,9 +7351,9 @@ define <vscale x 4 x i32> @test_vloxseg4_mask_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -7473,9 +7384,9 @@ define <vscale x 4 x i32> @test_vloxseg4_mask_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -7508,7 +7419,6 @@ define <vscale x 16 x half> @test_vloxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -7538,7 +7448,6 @@ define <vscale x 16 x half> @test_vloxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -7568,7 +7477,6 @@ define <vscale x 16 x half> @test_vloxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -7598,7 +7506,6 @@ define <vscale x 4 x double> @test_vloxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x d
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7628,7 +7535,6 @@ define <vscale x 4 x double> @test_vloxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x do
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7658,7 +7564,6 @@ define <vscale x 4 x double> @test_vloxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x d
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7688,7 +7593,6 @@ define <vscale x 1 x double> @test_vloxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -7718,7 +7622,6 @@ define <vscale x 1 x double> @test_vloxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -7748,7 +7651,6 @@ define <vscale x 1 x double> @test_vloxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -7775,12 +7677,11 @@ entry:
 define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -7807,12 +7708,11 @@ entry:
 define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -7839,12 +7739,11 @@ entry:
 define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -7872,9 +7771,9 @@ define <vscale x 1 x double> @test_vloxseg4_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7905,9 +7804,9 @@ define <vscale x 1 x double> @test_vloxseg4_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7938,9 +7837,9 @@ define <vscale x 1 x double> @test_vloxseg4_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7971,10 +7870,10 @@ define <vscale x 1 x double> @test_vloxseg5_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8005,10 +7904,10 @@ define <vscale x 1 x double> @test_vloxseg5_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8039,10 +7938,10 @@ define <vscale x 1 x double> @test_vloxseg5_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8073,11 +7972,11 @@ define <vscale x 1 x double> @test_vloxseg6_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8108,11 +8007,11 @@ define <vscale x 1 x double> @test_vloxseg6_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8143,11 +8042,11 @@ define <vscale x 1 x double> @test_vloxseg6_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8178,12 +8077,12 @@ define <vscale x 1 x double> @test_vloxseg7_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8214,12 +8113,12 @@ define <vscale x 1 x double> @test_vloxseg7_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8250,12 +8149,12 @@ define <vscale x 1 x double> @test_vloxseg7_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8286,13 +8185,13 @@ define <vscale x 1 x double> @test_vloxseg8_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8323,13 +8222,13 @@ define <vscale x 1 x double> @test_vloxseg8_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8360,13 +8259,13 @@ define <vscale x 1 x double> @test_vloxseg8_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8399,7 +8298,6 @@ define <vscale x 2 x float> @test_vloxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -8429,7 +8327,6 @@ define <vscale x 2 x float> @test_vloxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -8459,7 +8356,6 @@ define <vscale x 2 x float> @test_vloxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -8486,12 +8382,11 @@ entry:
 define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -8518,12 +8413,11 @@ entry:
 define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -8550,12 +8444,11 @@ entry:
 define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -8583,9 +8476,9 @@ define <vscale x 2 x float> @test_vloxseg4_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8616,9 +8509,9 @@ define <vscale x 2 x float> @test_vloxseg4_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8649,9 +8542,9 @@ define <vscale x 2 x float> @test_vloxseg4_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8682,10 +8575,10 @@ define <vscale x 2 x float> @test_vloxseg5_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8716,10 +8609,10 @@ define <vscale x 2 x float> @test_vloxseg5_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8750,10 +8643,10 @@ define <vscale x 2 x float> @test_vloxseg5_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8784,11 +8677,11 @@ define <vscale x 2 x float> @test_vloxseg6_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8819,11 +8712,11 @@ define <vscale x 2 x float> @test_vloxseg6_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8854,11 +8747,11 @@ define <vscale x 2 x float> @test_vloxseg6_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8889,12 +8782,12 @@ define <vscale x 2 x float> @test_vloxseg7_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8925,12 +8818,12 @@ define <vscale x 2 x float> @test_vloxseg7_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8961,12 +8854,12 @@ define <vscale x 2 x float> @test_vloxseg7_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8997,13 +8890,13 @@ define <vscale x 2 x float> @test_vloxseg8_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9034,13 +8927,13 @@ define <vscale x 2 x float> @test_vloxseg8_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9071,13 +8964,13 @@ define <vscale x 2 x float> @test_vloxseg8_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9110,7 +9003,6 @@ define <vscale x 1 x half> @test_vloxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9140,7 +9032,6 @@ define <vscale x 1 x half> @test_vloxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9170,7 +9061,6 @@ define <vscale x 1 x half> @test_vloxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9197,12 +9087,11 @@ entry:
 define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9229,12 +9118,11 @@ entry:
 define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9261,12 +9149,11 @@ entry:
 define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9294,9 +9181,9 @@ define <vscale x 1 x half> @test_vloxseg4_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9327,9 +9214,9 @@ define <vscale x 1 x half> @test_vloxseg4_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9360,9 +9247,9 @@ define <vscale x 1 x half> @test_vloxseg4_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9393,10 +9280,10 @@ define <vscale x 1 x half> @test_vloxseg5_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9427,10 +9314,10 @@ define <vscale x 1 x half> @test_vloxseg5_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9461,10 +9348,10 @@ define <vscale x 1 x half> @test_vloxseg5_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9495,11 +9382,11 @@ define <vscale x 1 x half> @test_vloxseg6_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9530,11 +9417,11 @@ define <vscale x 1 x half> @test_vloxseg6_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9565,11 +9452,11 @@ define <vscale x 1 x half> @test_vloxseg6_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9600,12 +9487,12 @@ define <vscale x 1 x half> @test_vloxseg7_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9636,12 +9523,12 @@ define <vscale x 1 x half> @test_vloxseg7_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9672,12 +9559,12 @@ define <vscale x 1 x half> @test_vloxseg7_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9708,13 +9595,13 @@ define <vscale x 1 x half> @test_vloxseg8_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9745,13 +9632,13 @@ define <vscale x 1 x half> @test_vloxseg8_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9782,13 +9669,13 @@ define <vscale x 1 x half> @test_vloxseg8_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9821,7 +9708,6 @@ define <vscale x 1 x float> @test_vloxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9851,7 +9737,6 @@ define <vscale x 1 x float> @test_vloxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9881,7 +9766,6 @@ define <vscale x 1 x float> @test_vloxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9908,12 +9792,11 @@ entry:
 define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9940,12 +9823,11 @@ entry:
 define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9972,12 +9854,11 @@ entry:
 define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -10005,9 +9886,9 @@ define <vscale x 1 x float> @test_vloxseg4_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10038,9 +9919,9 @@ define <vscale x 1 x float> @test_vloxseg4_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10071,9 +9952,9 @@ define <vscale x 1 x float> @test_vloxseg4_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10104,10 +9985,10 @@ define <vscale x 1 x float> @test_vloxseg5_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10138,10 +10019,10 @@ define <vscale x 1 x float> @test_vloxseg5_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10172,10 +10053,10 @@ define <vscale x 1 x float> @test_vloxseg5_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10206,11 +10087,11 @@ define <vscale x 1 x float> @test_vloxseg6_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10241,11 +10122,11 @@ define <vscale x 1 x float> @test_vloxseg6_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10276,11 +10157,11 @@ define <vscale x 1 x float> @test_vloxseg6_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10311,12 +10192,12 @@ define <vscale x 1 x float> @test_vloxseg7_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10347,12 +10228,12 @@ define <vscale x 1 x float> @test_vloxseg7_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10383,12 +10264,12 @@ define <vscale x 1 x float> @test_vloxseg7_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10419,13 +10300,13 @@ define <vscale x 1 x float> @test_vloxseg8_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10456,13 +10337,13 @@ define <vscale x 1 x float> @test_vloxseg8_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10493,13 +10374,13 @@ define <vscale x 1 x float> @test_vloxseg8_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10532,7 +10413,6 @@ define <vscale x 8 x half> @test_vloxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x hal
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10562,7 +10442,6 @@ define <vscale x 8 x half> @test_vloxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10592,7 +10471,6 @@ define <vscale x 8 x half> @test_vloxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x hal
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10619,12 +10497,11 @@ entry:
 define <vscale x 8 x half> @test_vloxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10651,12 +10528,11 @@ entry:
 define <vscale x 8 x half> @test_vloxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10684,10 +10560,9 @@ define <vscale x 8 x half> @test_vloxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x hal
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10715,9 +10590,9 @@ define <vscale x 8 x half> @test_vloxseg4_mask_nxv8f16_nxv8i16(<vscale x 8 x hal
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -10748,9 +10623,9 @@ define <vscale x 8 x half> @test_vloxseg4_mask_nxv8f16_nxv8i8(<vscale x 8 x half
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -10780,13 +10655,12 @@ entry:
 define <vscale x 8 x half> @test_vloxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
+; CHECK-NEXT:    vloxseg4ei32.v v6, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10816,7 +10690,6 @@ define <vscale x 8 x float> @test_vloxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x fl
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10846,7 +10719,6 @@ define <vscale x 8 x float> @test_vloxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x flo
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10876,7 +10748,6 @@ define <vscale x 8 x float> @test_vloxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x fl
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10906,7 +10777,6 @@ define <vscale x 2 x double> @test_vloxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x d
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -10936,7 +10806,6 @@ define <vscale x 2 x double> @test_vloxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x do
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -10966,7 +10835,6 @@ define <vscale x 2 x double> @test_vloxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x d
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -10993,12 +10861,11 @@ entry:
 define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei32.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -11025,12 +10892,11 @@ entry:
 define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -11057,12 +10923,11 @@ entry:
 define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -11090,9 +10955,9 @@ define <vscale x 2 x double> @test_vloxseg4_mask_nxv2f64_nxv2i32(<vscale x 2 x d
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -11123,9 +10988,9 @@ define <vscale x 2 x double> @test_vloxseg4_mask_nxv2f64_nxv2i8(<vscale x 2 x do
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -11156,9 +11021,9 @@ define <vscale x 2 x double> @test_vloxseg4_mask_nxv2f64_nxv2i16(<vscale x 2 x d
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -11191,7 +11056,6 @@ define <vscale x 4 x half> @test_vloxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -11221,7 +11085,6 @@ define <vscale x 4 x half> @test_vloxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -11251,7 +11114,6 @@ define <vscale x 4 x half> @test_vloxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -11278,12 +11140,11 @@ entry:
 define <vscale x 4 x half> @test_vloxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -11310,12 +11171,11 @@ entry:
 define <vscale x 4 x half> @test_vloxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -11343,10 +11203,9 @@ define <vscale x 4 x half> @test_vloxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -11374,9 +11233,9 @@ define <vscale x 4 x half> @test_vloxseg4_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11407,9 +11266,9 @@ define <vscale x 4 x half> @test_vloxseg4_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11439,13 +11298,12 @@ entry:
 define <vscale x 4 x half> @test_vloxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vloxseg4ei32.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -11473,10 +11331,10 @@ define <vscale x 4 x half> @test_vloxseg5_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11507,10 +11365,10 @@ define <vscale x 4 x half> @test_vloxseg5_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11541,10 +11399,10 @@ define <vscale x 4 x half> @test_vloxseg5_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -11575,11 +11433,11 @@ define <vscale x 4 x half> @test_vloxseg6_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11610,11 +11468,11 @@ define <vscale x 4 x half> @test_vloxseg6_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11645,11 +11503,11 @@ define <vscale x 4 x half> @test_vloxseg6_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -11680,12 +11538,12 @@ define <vscale x 4 x half> @test_vloxseg7_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11716,12 +11574,12 @@ define <vscale x 4 x half> @test_vloxseg7_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11752,12 +11610,12 @@ define <vscale x 4 x half> @test_vloxseg7_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -11788,13 +11646,13 @@ define <vscale x 4 x half> @test_vloxseg8_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11825,13 +11683,13 @@ define <vscale x 4 x half> @test_vloxseg8_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11862,13 +11720,13 @@ define <vscale x 4 x half> @test_vloxseg8_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -11901,7 +11759,6 @@ define <vscale x 2 x half> @test_vloxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -11931,7 +11788,6 @@ define <vscale x 2 x half> @test_vloxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -11961,7 +11817,6 @@ define <vscale x 2 x half> @test_vloxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -11988,12 +11843,11 @@ entry:
 define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -12020,12 +11874,11 @@ entry:
 define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -12052,12 +11905,11 @@ entry:
 define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -12085,9 +11937,9 @@ define <vscale x 2 x half> @test_vloxseg4_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12118,9 +11970,9 @@ define <vscale x 2 x half> @test_vloxseg4_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12151,9 +12003,9 @@ define <vscale x 2 x half> @test_vloxseg4_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12184,10 +12036,10 @@ define <vscale x 2 x half> @test_vloxseg5_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12218,10 +12070,10 @@ define <vscale x 2 x half> @test_vloxseg5_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12252,10 +12104,10 @@ define <vscale x 2 x half> @test_vloxseg5_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12286,11 +12138,11 @@ define <vscale x 2 x half> @test_vloxseg6_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12321,11 +12173,11 @@ define <vscale x 2 x half> @test_vloxseg6_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12356,11 +12208,11 @@ define <vscale x 2 x half> @test_vloxseg6_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12391,12 +12243,12 @@ define <vscale x 2 x half> @test_vloxseg7_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12427,12 +12279,12 @@ define <vscale x 2 x half> @test_vloxseg7_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12463,12 +12315,12 @@ define <vscale x 2 x half> @test_vloxseg7_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12499,13 +12351,13 @@ define <vscale x 2 x half> @test_vloxseg8_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12536,13 +12388,13 @@ define <vscale x 2 x half> @test_vloxseg8_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12573,13 +12425,13 @@ define <vscale x 2 x half> @test_vloxseg8_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12612,7 +12464,6 @@ define <vscale x 4 x float> @test_vloxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x fl
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -12642,7 +12493,6 @@ define <vscale x 4 x float> @test_vloxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x flo
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -12672,7 +12522,6 @@ define <vscale x 4 x float> @test_vloxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x fl
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -12699,12 +12548,11 @@ entry:
 define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -12731,12 +12579,11 @@ entry:
 define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -12763,12 +12610,11 @@ entry:
 define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei32.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -12796,9 +12642,9 @@ define <vscale x 4 x float> @test_vloxseg4_mask_nxv4f32_nxv4i16(<vscale x 4 x fl
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -12829,9 +12675,9 @@ define <vscale x 4 x float> @test_vloxseg4_mask_nxv4f32_nxv4i8(<vscale x 4 x flo
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -12862,9 +12708,9 @@ define <vscale x 4 x float> @test_vloxseg4_mask_nxv4f32_nxv4i32(<vscale x 4 x fl
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll
index b79a0fb709022..ad2af75acef4f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vloxseg-rv64.ll
@@ -24,7 +24,6 @@ define <vscale x 16 x i16> @test_vloxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -54,7 +53,6 @@ define <vscale x 16 x i16> @test_vloxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -84,7 +82,6 @@ define <vscale x 16 x i16> @test_vloxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vloxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -114,7 +111,6 @@ define <vscale x 4 x i32> @test_vloxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -144,7 +140,6 @@ define <vscale x 4 x i32> @test_vloxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -174,7 +169,6 @@ define <vscale x 4 x i32> @test_vloxseg2_mask_nxv4i32_nxv4i64(<vscale x 4 x i32>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -204,7 +198,6 @@ define <vscale x 4 x i32> @test_vloxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -231,12 +224,11 @@ entry:
 define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei32.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -263,12 +255,11 @@ entry:
 define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -296,10 +287,9 @@ define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i64(<vscale x 4 x i32>
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg3ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -326,12 +316,11 @@ entry:
 define <vscale x 4 x i32> @test_vloxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -359,9 +348,9 @@ define <vscale x 4 x i32> @test_vloxseg4_mask_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -392,9 +381,9 @@ define <vscale x 4 x i32> @test_vloxseg4_mask_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -424,13 +413,12 @@ entry:
 define <vscale x 4 x i32> @test_vloxseg4_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxseg4ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
+; CHECK-NEXT:    vloxseg4ei64.v v6, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vloxseg4.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -458,9 +446,9 @@ define <vscale x 4 x i32> @test_vloxseg4_mask_nxv4i32_nxv4i16(<vscale x 4 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -493,7 +481,6 @@ define <vscale x 16 x i8> @test_vloxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -523,7 +510,6 @@ define <vscale x 16 x i8> @test_vloxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -553,7 +539,6 @@ define <vscale x 16 x i8> @test_vloxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -581,10 +566,9 @@ define <vscale x 16 x i8> @test_vloxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8
 ; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vloxseg3ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -611,12 +595,11 @@ entry:
 define <vscale x 16 x i8> @test_vloxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -644,10 +627,9 @@ define <vscale x 16 x i8> @test_vloxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8
 ; CHECK-LABEL: test_vloxseg3_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vloxseg3ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -674,13 +656,12 @@ entry:
 define <vscale x 16 x i8> @test_vloxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vloxseg4ei16.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
+; CHECK-NEXT:    vloxseg4ei16.v v6, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -708,9 +689,9 @@ define <vscale x 16 x i8> @test_vloxseg4_mask_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -741,11 +722,10 @@ define <vscale x 16 x i8> @test_vloxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8
 ; CHECK-LABEL: test_vloxseg4_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vloxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -775,7 +755,6 @@ define <vscale x 1 x i64> @test_vloxseg2_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -805,7 +784,6 @@ define <vscale x 1 x i64> @test_vloxseg2_mask_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -835,7 +813,6 @@ define <vscale x 1 x i64> @test_vloxseg2_mask_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -865,7 +842,6 @@ define <vscale x 1 x i64> @test_vloxseg2_mask_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg2.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -892,12 +868,11 @@ entry:
 define <vscale x 1 x i64> @test_vloxseg3_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei64.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -924,12 +899,11 @@ entry:
 define <vscale x 1 x i64> @test_vloxseg3_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -956,12 +930,11 @@ entry:
 define <vscale x 1 x i64> @test_vloxseg3_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -988,12 +961,11 @@ entry:
 define <vscale x 1 x i64> @test_vloxseg3_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vloxseg3.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1021,9 +993,9 @@ define <vscale x 1 x i64> @test_vloxseg4_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1054,9 +1026,9 @@ define <vscale x 1 x i64> @test_vloxseg4_mask_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1087,9 +1059,9 @@ define <vscale x 1 x i64> @test_vloxseg4_mask_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1120,9 +1092,9 @@ define <vscale x 1 x i64> @test_vloxseg4_mask_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1153,10 +1125,10 @@ define <vscale x 1 x i64> @test_vloxseg5_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1187,10 +1159,10 @@ define <vscale x 1 x i64> @test_vloxseg5_mask_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1221,10 +1193,10 @@ define <vscale x 1 x i64> @test_vloxseg5_mask_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1255,10 +1227,10 @@ define <vscale x 1 x i64> @test_vloxseg5_mask_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1289,11 +1261,11 @@ define <vscale x 1 x i64> @test_vloxseg6_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1324,11 +1296,11 @@ define <vscale x 1 x i64> @test_vloxseg6_mask_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1359,11 +1331,11 @@ define <vscale x 1 x i64> @test_vloxseg6_mask_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1394,11 +1366,11 @@ define <vscale x 1 x i64> @test_vloxseg6_mask_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1429,12 +1401,12 @@ define <vscale x 1 x i64> @test_vloxseg7_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1465,12 +1437,12 @@ define <vscale x 1 x i64> @test_vloxseg7_mask_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1501,12 +1473,12 @@ define <vscale x 1 x i64> @test_vloxseg7_mask_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1537,12 +1509,12 @@ define <vscale x 1 x i64> @test_vloxseg7_mask_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1573,13 +1545,13 @@ define <vscale x 1 x i64> @test_vloxseg8_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1610,13 +1582,13 @@ define <vscale x 1 x i64> @test_vloxseg8_mask_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1647,13 +1619,13 @@ define <vscale x 1 x i64> @test_vloxseg8_mask_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1684,13 +1656,13 @@ define <vscale x 1 x i64> @test_vloxseg8_mask_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1723,7 +1695,6 @@ define <vscale x 1 x i32> @test_vloxseg2_mask_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1753,7 +1724,6 @@ define <vscale x 1 x i32> @test_vloxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1783,7 +1753,6 @@ define <vscale x 1 x i32> @test_vloxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1813,7 +1782,6 @@ define <vscale x 1 x i32> @test_vloxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1840,12 +1808,11 @@ entry:
 define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei64.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1872,12 +1839,11 @@ entry:
 define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1904,12 +1870,11 @@ entry:
 define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1936,12 +1901,11 @@ entry:
 define <vscale x 1 x i32> @test_vloxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vloxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1969,9 +1933,9 @@ define <vscale x 1 x i32> @test_vloxseg4_mask_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2002,9 +1966,9 @@ define <vscale x 1 x i32> @test_vloxseg4_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2035,9 +1999,9 @@ define <vscale x 1 x i32> @test_vloxseg4_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2068,9 +2032,9 @@ define <vscale x 1 x i32> @test_vloxseg4_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2101,10 +2065,10 @@ define <vscale x 1 x i32> @test_vloxseg5_mask_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2135,10 +2099,10 @@ define <vscale x 1 x i32> @test_vloxseg5_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2169,10 +2133,10 @@ define <vscale x 1 x i32> @test_vloxseg5_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2203,10 +2167,10 @@ define <vscale x 1 x i32> @test_vloxseg5_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2237,11 +2201,11 @@ define <vscale x 1 x i32> @test_vloxseg6_mask_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2272,11 +2236,11 @@ define <vscale x 1 x i32> @test_vloxseg6_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2307,11 +2271,11 @@ define <vscale x 1 x i32> @test_vloxseg6_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2342,11 +2306,11 @@ define <vscale x 1 x i32> @test_vloxseg6_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2377,12 +2341,12 @@ define <vscale x 1 x i32> @test_vloxseg7_mask_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2413,12 +2377,12 @@ define <vscale x 1 x i32> @test_vloxseg7_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2449,12 +2413,12 @@ define <vscale x 1 x i32> @test_vloxseg7_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2485,12 +2449,12 @@ define <vscale x 1 x i32> @test_vloxseg7_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2521,13 +2485,13 @@ define <vscale x 1 x i32> @test_vloxseg8_mask_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2558,13 +2522,13 @@ define <vscale x 1 x i32> @test_vloxseg8_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2595,13 +2559,13 @@ define <vscale x 1 x i32> @test_vloxseg8_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2632,13 +2596,13 @@ define <vscale x 1 x i32> @test_vloxseg8_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2671,7 +2635,6 @@ define <vscale x 8 x i16> @test_vloxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2701,7 +2664,6 @@ define <vscale x 8 x i16> @test_vloxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2731,7 +2693,6 @@ define <vscale x 8 x i16> @test_vloxseg2_mask_nxv8i16_nxv8i64(<vscale x 8 x i16>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2761,7 +2722,6 @@ define <vscale x 8 x i16> @test_vloxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2788,12 +2748,11 @@ entry:
 define <vscale x 8 x i16> @test_vloxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2820,12 +2779,11 @@ entry:
 define <vscale x 8 x i16> @test_vloxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2853,10 +2811,9 @@ define <vscale x 8 x i16> @test_vloxseg3_mask_nxv8i16_nxv8i64(<vscale x 8 x i16>
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg3ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2884,10 +2841,9 @@ define <vscale x 8 x i16> @test_vloxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16>
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2915,9 +2871,9 @@ define <vscale x 8 x i16> @test_vloxseg4_mask_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -2948,9 +2904,9 @@ define <vscale x 8 x i16> @test_vloxseg4_mask_nxv8i16_nxv8i8(<vscale x 8 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -2981,11 +2937,10 @@ define <vscale x 8 x i16> @test_vloxseg4_mask_nxv8i16_nxv8i64(<vscale x 8 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -3012,13 +2967,12 @@ entry:
 define <vscale x 8 x i16> @test_vloxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
+; CHECK-NEXT:    vloxseg4ei32.v v6, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vloxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -3048,7 +3002,6 @@ define <vscale x 4 x i8> @test_vloxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3078,7 +3031,6 @@ define <vscale x 4 x i8> @test_vloxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3108,7 +3060,6 @@ define <vscale x 4 x i8> @test_vloxseg2_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3138,7 +3089,6 @@ define <vscale x 4 x i8> @test_vloxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3166,10 +3116,9 @@ define <vscale x 4 x i8> @test_vloxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3196,12 +3145,11 @@ entry:
 define <vscale x 4 x i8> @test_vloxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3229,10 +3177,9 @@ define <vscale x 4 x i8> @test_vloxseg3_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3259,12 +3206,11 @@ entry:
 define <vscale x 4 x i8> @test_vloxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3291,13 +3237,12 @@ entry:
 define <vscale x 4 x i8> @test_vloxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vloxseg4ei32.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3325,9 +3270,9 @@ define <vscale x 4 x i8> @test_vloxseg4_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3358,11 +3303,10 @@ define <vscale x 4 x i8> @test_vloxseg4_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg4.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3390,9 +3334,9 @@ define <vscale x 4 x i8> @test_vloxseg4_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3423,10 +3367,10 @@ define <vscale x 4 x i8> @test_vloxseg5_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -3457,10 +3401,10 @@ define <vscale x 4 x i8> @test_vloxseg5_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3491,12 +3435,11 @@ define <vscale x 4 x i8> @test_vloxseg5_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3524,10 +3467,10 @@ define <vscale x 4 x i8> @test_vloxseg5_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3558,11 +3501,11 @@ define <vscale x 4 x i8> @test_vloxseg6_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -3593,11 +3536,11 @@ define <vscale x 4 x i8> @test_vloxseg6_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3627,15 +3570,14 @@ entry:
 define <vscale x 4 x i8> @test_vloxseg6_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vloxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
+; CHECK-NEXT:    vloxseg6ei64.v v7, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vloxseg6.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3663,11 +3605,11 @@ define <vscale x 4 x i8> @test_vloxseg6_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3698,12 +3640,12 @@ define <vscale x 4 x i8> @test_vloxseg7_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -3734,12 +3676,12 @@ define <vscale x 4 x i8> @test_vloxseg7_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3770,12 +3712,12 @@ define <vscale x 4 x i8> @test_vloxseg7_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -3806,12 +3748,12 @@ define <vscale x 4 x i8> @test_vloxseg7_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3842,13 +3784,13 @@ define <vscale x 4 x i8> @test_vloxseg8_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -3879,13 +3821,13 @@ define <vscale x 4 x i8> @test_vloxseg8_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3916,13 +3858,13 @@ define <vscale x 4 x i8> @test_vloxseg8_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -3953,13 +3895,13 @@ define <vscale x 4 x i8> @test_vloxseg8_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3992,7 +3934,6 @@ define <vscale x 1 x i16> @test_vloxseg2_mask_nxv1i16_nxv1i64(<vscale x 1 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4022,7 +3963,6 @@ define <vscale x 1 x i16> @test_vloxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4052,7 +3992,6 @@ define <vscale x 1 x i16> @test_vloxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4082,7 +4021,6 @@ define <vscale x 1 x i16> @test_vloxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4109,12 +4047,11 @@ entry:
 define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei64.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4141,12 +4078,11 @@ entry:
 define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4173,12 +4109,11 @@ entry:
 define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4205,12 +4140,11 @@ entry:
 define <vscale x 1 x i16> @test_vloxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vloxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4238,9 +4172,9 @@ define <vscale x 1 x i16> @test_vloxseg4_mask_nxv1i16_nxv1i64(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4271,9 +4205,9 @@ define <vscale x 1 x i16> @test_vloxseg4_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4304,9 +4238,9 @@ define <vscale x 1 x i16> @test_vloxseg4_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4337,9 +4271,9 @@ define <vscale x 1 x i16> @test_vloxseg4_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4370,10 +4304,10 @@ define <vscale x 1 x i16> @test_vloxseg5_mask_nxv1i16_nxv1i64(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4404,10 +4338,10 @@ define <vscale x 1 x i16> @test_vloxseg5_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4438,10 +4372,10 @@ define <vscale x 1 x i16> @test_vloxseg5_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4472,10 +4406,10 @@ define <vscale x 1 x i16> @test_vloxseg5_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4506,11 +4440,11 @@ define <vscale x 1 x i16> @test_vloxseg6_mask_nxv1i16_nxv1i64(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4541,11 +4475,11 @@ define <vscale x 1 x i16> @test_vloxseg6_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4576,11 +4510,11 @@ define <vscale x 1 x i16> @test_vloxseg6_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4611,11 +4545,11 @@ define <vscale x 1 x i16> @test_vloxseg6_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4646,12 +4580,12 @@ define <vscale x 1 x i16> @test_vloxseg7_mask_nxv1i16_nxv1i64(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4682,12 +4616,12 @@ define <vscale x 1 x i16> @test_vloxseg7_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4718,12 +4652,12 @@ define <vscale x 1 x i16> @test_vloxseg7_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4754,12 +4688,12 @@ define <vscale x 1 x i16> @test_vloxseg7_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4790,13 +4724,13 @@ define <vscale x 1 x i16> @test_vloxseg8_mask_nxv1i16_nxv1i64(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4827,13 +4761,13 @@ define <vscale x 1 x i16> @test_vloxseg8_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4864,13 +4798,13 @@ define <vscale x 1 x i16> @test_vloxseg8_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4901,13 +4835,13 @@ define <vscale x 1 x i16> @test_vloxseg8_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4940,7 +4874,6 @@ define <vscale x 2 x i32> @test_vloxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -4970,7 +4903,6 @@ define <vscale x 2 x i32> @test_vloxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5000,7 +4932,6 @@ define <vscale x 2 x i32> @test_vloxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5030,7 +4961,6 @@ define <vscale x 2 x i32> @test_vloxseg2_mask_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg2.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5057,12 +4987,11 @@ entry:
 define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5089,12 +5018,11 @@ entry:
 define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5121,12 +5049,11 @@ entry:
 define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5154,10 +5081,9 @@ define <vscale x 2 x i32> @test_vloxseg3_mask_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5185,9 +5111,9 @@ define <vscale x 2 x i32> @test_vloxseg4_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5218,9 +5144,9 @@ define <vscale x 2 x i32> @test_vloxseg4_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5251,9 +5177,9 @@ define <vscale x 2 x i32> @test_vloxseg4_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5283,13 +5209,12 @@ entry:
 define <vscale x 2 x i32> @test_vloxseg4_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vloxseg4ei64.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vloxseg4.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5317,10 +5242,10 @@ define <vscale x 2 x i32> @test_vloxseg5_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5351,10 +5276,10 @@ define <vscale x 2 x i32> @test_vloxseg5_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5385,10 +5310,10 @@ define <vscale x 2 x i32> @test_vloxseg5_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5419,10 +5344,10 @@ define <vscale x 2 x i32> @test_vloxseg5_mask_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -5453,11 +5378,11 @@ define <vscale x 2 x i32> @test_vloxseg6_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5488,11 +5413,11 @@ define <vscale x 2 x i32> @test_vloxseg6_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5523,11 +5448,11 @@ define <vscale x 2 x i32> @test_vloxseg6_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5558,11 +5483,11 @@ define <vscale x 2 x i32> @test_vloxseg6_mask_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -5593,12 +5518,12 @@ define <vscale x 2 x i32> @test_vloxseg7_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5629,12 +5554,12 @@ define <vscale x 2 x i32> @test_vloxseg7_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5665,12 +5590,12 @@ define <vscale x 2 x i32> @test_vloxseg7_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5701,12 +5626,12 @@ define <vscale x 2 x i32> @test_vloxseg7_mask_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -5737,13 +5662,13 @@ define <vscale x 2 x i32> @test_vloxseg8_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5774,13 +5699,13 @@ define <vscale x 2 x i32> @test_vloxseg8_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5811,13 +5736,13 @@ define <vscale x 2 x i32> @test_vloxseg8_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5848,13 +5773,13 @@ define <vscale x 2 x i32> @test_vloxseg8_mask_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -5887,7 +5812,6 @@ define <vscale x 8 x i8> @test_vloxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -5917,7 +5841,6 @@ define <vscale x 8 x i8> @test_vloxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -5947,7 +5870,6 @@ define <vscale x 8 x i8> @test_vloxseg2_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -5977,7 +5899,6 @@ define <vscale x 8 x i8> @test_vloxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6005,10 +5926,9 @@ define <vscale x 8 x i8> @test_vloxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6035,12 +5955,11 @@ entry:
 define <vscale x 8 x i8> @test_vloxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6068,10 +5987,9 @@ define <vscale x 8 x i8> @test_vloxseg3_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg3ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6099,10 +6017,9 @@ define <vscale x 8 x i8> @test_vloxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6129,13 +6046,12 @@ entry:
 define <vscale x 8 x i8> @test_vloxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vloxseg4ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vloxseg4ei16.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6163,9 +6079,9 @@ define <vscale x 8 x i8> @test_vloxseg4_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6196,11 +6112,10 @@ define <vscale x 8 x i8> @test_vloxseg4_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6228,11 +6143,10 @@ define <vscale x 8 x i8> @test_vloxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6260,10 +6174,10 @@ define <vscale x 8 x i8> @test_vloxseg5_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -6294,10 +6208,10 @@ define <vscale x 8 x i8> @test_vloxseg5_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6328,12 +6242,11 @@ define <vscale x 8 x i8> @test_vloxseg5_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6361,12 +6274,11 @@ define <vscale x 8 x i8> @test_vloxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6394,11 +6306,11 @@ define <vscale x 8 x i8> @test_vloxseg6_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -6429,11 +6341,11 @@ define <vscale x 8 x i8> @test_vloxseg6_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6464,13 +6376,12 @@ define <vscale x 8 x i8> @test_vloxseg6_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6497,15 +6408,14 @@ entry:
 define <vscale x 8 x i8> @test_vloxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg6_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vloxseg6ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
+; CHECK-NEXT:    vloxseg6ei32.v v7, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6533,12 +6443,12 @@ define <vscale x 8 x i8> @test_vloxseg7_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -6569,12 +6479,12 @@ define <vscale x 8 x i8> @test_vloxseg7_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6605,14 +6515,13 @@ define <vscale x 8 x i8> @test_vloxseg7_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg7.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6640,12 +6549,12 @@ define <vscale x 8 x i8> @test_vloxseg7_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -6676,13 +6585,13 @@ define <vscale x 8 x i8> @test_vloxseg8_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -6713,13 +6622,13 @@ define <vscale x 8 x i8> @test_vloxseg8_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6750,15 +6659,14 @@ define <vscale x 8 x i8> @test_vloxseg8_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vloxseg8.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6786,13 +6694,13 @@ define <vscale x 8 x i8> @test_vloxseg8_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -6825,7 +6733,6 @@ define <vscale x 4 x i64> @test_vloxseg2_mask_nxv4i64_nxv4i32(<vscale x 4 x i64>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i32(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -6855,7 +6762,6 @@ define <vscale x 4 x i64> @test_vloxseg2_mask_nxv4i64_nxv4i8(<vscale x 4 x i64>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -6885,7 +6791,6 @@ define <vscale x 4 x i64> @test_vloxseg2_mask_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -6915,7 +6820,6 @@ define <vscale x 4 x i64> @test_vloxseg2_mask_nxv4i64_nxv4i16(<vscale x 4 x i64>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vloxseg2.mask.nxv4i64.nxv4i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -6945,7 +6849,6 @@ define <vscale x 4 x i16> @test_vloxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -6975,7 +6878,6 @@ define <vscale x 4 x i16> @test_vloxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7005,7 +6907,6 @@ define <vscale x 4 x i16> @test_vloxseg2_mask_nxv4i16_nxv4i64(<vscale x 4 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7035,7 +6936,6 @@ define <vscale x 4 x i16> @test_vloxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7063,10 +6963,9 @@ define <vscale x 4 x i16> @test_vloxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7093,12 +6992,11 @@ entry:
 define <vscale x 4 x i16> @test_vloxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7126,10 +7024,9 @@ define <vscale x 4 x i16> @test_vloxseg3_mask_nxv4i16_nxv4i64(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7156,12 +7053,11 @@ entry:
 define <vscale x 4 x i16> @test_vloxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7188,13 +7084,12 @@ entry:
 define <vscale x 4 x i16> @test_vloxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vloxseg4ei32.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7222,9 +7117,9 @@ define <vscale x 4 x i16> @test_vloxseg4_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7255,11 +7150,10 @@ define <vscale x 4 x i16> @test_vloxseg4_mask_nxv4i16_nxv4i64(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg4.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7287,9 +7181,9 @@ define <vscale x 4 x i16> @test_vloxseg4_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7320,10 +7214,10 @@ define <vscale x 4 x i16> @test_vloxseg5_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -7354,10 +7248,10 @@ define <vscale x 4 x i16> @test_vloxseg5_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7388,12 +7282,11 @@ define <vscale x 4 x i16> @test_vloxseg5_mask_nxv4i16_nxv4i64(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7421,10 +7314,10 @@ define <vscale x 4 x i16> @test_vloxseg5_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7455,11 +7348,11 @@ define <vscale x 4 x i16> @test_vloxseg6_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -7490,11 +7383,11 @@ define <vscale x 4 x i16> @test_vloxseg6_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7524,15 +7417,14 @@ entry:
 define <vscale x 4 x i16> @test_vloxseg6_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
+; CHECK-NEXT:    vloxseg6ei64.v v7, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vloxseg6.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7560,11 +7452,11 @@ define <vscale x 4 x i16> @test_vloxseg6_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7595,12 +7487,12 @@ define <vscale x 4 x i16> @test_vloxseg7_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -7631,12 +7523,12 @@ define <vscale x 4 x i16> @test_vloxseg7_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7667,12 +7559,12 @@ define <vscale x 4 x i16> @test_vloxseg7_mask_nxv4i16_nxv4i64(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -7703,12 +7595,12 @@ define <vscale x 4 x i16> @test_vloxseg7_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7739,13 +7631,13 @@ define <vscale x 4 x i16> @test_vloxseg8_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -7776,13 +7668,13 @@ define <vscale x 4 x i16> @test_vloxseg8_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7813,13 +7705,13 @@ define <vscale x 4 x i16> @test_vloxseg8_mask_nxv4i16_nxv4i64(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -7850,13 +7742,13 @@ define <vscale x 4 x i16> @test_vloxseg8_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7889,7 +7781,6 @@ define <vscale x 1 x i8> @test_vloxseg2_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -7919,7 +7810,6 @@ define <vscale x 1 x i8> @test_vloxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -7949,7 +7839,6 @@ define <vscale x 1 x i8> @test_vloxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -7979,7 +7868,6 @@ define <vscale x 1 x i8> @test_vloxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -8006,12 +7894,11 @@ entry:
 define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei64.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -8038,12 +7925,11 @@ entry:
 define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -8070,12 +7956,11 @@ entry:
 define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -8102,12 +7987,11 @@ entry:
 define <vscale x 1 x i8> @test_vloxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vloxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -8135,9 +8019,9 @@ define <vscale x 1 x i8> @test_vloxseg4_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8168,9 +8052,9 @@ define <vscale x 1 x i8> @test_vloxseg4_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8201,9 +8085,9 @@ define <vscale x 1 x i8> @test_vloxseg4_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8234,9 +8118,9 @@ define <vscale x 1 x i8> @test_vloxseg4_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8267,10 +8151,10 @@ define <vscale x 1 x i8> @test_vloxseg5_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8301,10 +8185,10 @@ define <vscale x 1 x i8> @test_vloxseg5_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8335,10 +8219,10 @@ define <vscale x 1 x i8> @test_vloxseg5_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8369,10 +8253,10 @@ define <vscale x 1 x i8> @test_vloxseg5_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8403,11 +8287,11 @@ define <vscale x 1 x i8> @test_vloxseg6_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8438,11 +8322,11 @@ define <vscale x 1 x i8> @test_vloxseg6_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8473,11 +8357,11 @@ define <vscale x 1 x i8> @test_vloxseg6_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8508,11 +8392,11 @@ define <vscale x 1 x i8> @test_vloxseg6_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8543,12 +8427,12 @@ define <vscale x 1 x i8> @test_vloxseg7_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8579,12 +8463,12 @@ define <vscale x 1 x i8> @test_vloxseg7_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8615,12 +8499,12 @@ define <vscale x 1 x i8> @test_vloxseg7_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8651,12 +8535,12 @@ define <vscale x 1 x i8> @test_vloxseg7_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8687,13 +8571,13 @@ define <vscale x 1 x i8> @test_vloxseg8_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8724,13 +8608,13 @@ define <vscale x 1 x i8> @test_vloxseg8_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8761,14 +8645,14 @@ define <vscale x 1 x i8> @test_vloxseg8_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
@@ -8798,13 +8682,13 @@ define <vscale x 1 x i8> @test_vloxseg8_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8837,7 +8721,6 @@ define <vscale x 2 x i8> @test_vloxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -8867,7 +8750,6 @@ define <vscale x 2 x i8> @test_vloxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -8897,7 +8779,6 @@ define <vscale x 2 x i8> @test_vloxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -8927,7 +8808,6 @@ define <vscale x 2 x i8> @test_vloxseg2_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg2.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -8954,12 +8834,11 @@ entry:
 define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -8986,12 +8865,11 @@ entry:
 define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -9018,12 +8896,11 @@ entry:
 define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -9051,10 +8928,9 @@ define <vscale x 2 x i8> @test_vloxseg3_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg3.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -9082,9 +8958,9 @@ define <vscale x 2 x i8> @test_vloxseg4_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9115,9 +8991,9 @@ define <vscale x 2 x i8> @test_vloxseg4_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9148,9 +9024,9 @@ define <vscale x 2 x i8> @test_vloxseg4_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9180,13 +9056,12 @@ entry:
 define <vscale x 2 x i8> @test_vloxseg4_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vloxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vloxseg4ei64.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vloxseg4.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -9214,10 +9089,10 @@ define <vscale x 2 x i8> @test_vloxseg5_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9248,10 +9123,10 @@ define <vscale x 2 x i8> @test_vloxseg5_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9282,10 +9157,10 @@ define <vscale x 2 x i8> @test_vloxseg5_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9316,10 +9191,10 @@ define <vscale x 2 x i8> @test_vloxseg5_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -9350,11 +9225,11 @@ define <vscale x 2 x i8> @test_vloxseg6_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9385,11 +9260,11 @@ define <vscale x 2 x i8> @test_vloxseg6_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9420,11 +9295,11 @@ define <vscale x 2 x i8> @test_vloxseg6_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9455,11 +9330,11 @@ define <vscale x 2 x i8> @test_vloxseg6_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -9490,12 +9365,12 @@ define <vscale x 2 x i8> @test_vloxseg7_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9526,12 +9401,12 @@ define <vscale x 2 x i8> @test_vloxseg7_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9562,12 +9437,12 @@ define <vscale x 2 x i8> @test_vloxseg7_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9598,12 +9473,12 @@ define <vscale x 2 x i8> @test_vloxseg7_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -9634,13 +9509,13 @@ define <vscale x 2 x i8> @test_vloxseg8_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9671,13 +9546,13 @@ define <vscale x 2 x i8> @test_vloxseg8_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9708,13 +9583,13 @@ define <vscale x 2 x i8> @test_vloxseg8_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9745,13 +9620,13 @@ define <vscale x 2 x i8> @test_vloxseg8_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -9784,7 +9659,6 @@ define <vscale x 8 x i32> @test_vloxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -9814,7 +9688,6 @@ define <vscale x 8 x i32> @test_vloxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -9844,7 +9717,6 @@ define <vscale x 8 x i32> @test_vloxseg2_mask_nxv8i32_nxv8i64(<vscale x 8 x i32>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v4, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i64(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -9874,7 +9746,6 @@ define <vscale x 8 x i32> @test_vloxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vloxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -9904,7 +9775,6 @@ define <vscale x 32 x i8> @test_vloxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v4, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
@@ -9934,7 +9804,6 @@ define <vscale x 32 x i8> @test_vloxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vloxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
@@ -9964,7 +9833,6 @@ define <vscale x 2 x i16> @test_vloxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -9994,7 +9862,6 @@ define <vscale x 2 x i16> @test_vloxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10024,7 +9891,6 @@ define <vscale x 2 x i16> @test_vloxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10054,7 +9920,6 @@ define <vscale x 2 x i16> @test_vloxseg2_mask_nxv2i16_nxv2i64(<vscale x 2 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg2.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10081,12 +9946,11 @@ entry:
 define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10113,12 +9977,11 @@ entry:
 define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10145,12 +10008,11 @@ entry:
 define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10178,10 +10040,9 @@ define <vscale x 2 x i16> @test_vloxseg3_mask_nxv2i16_nxv2i64(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg3.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10209,9 +10070,9 @@ define <vscale x 2 x i16> @test_vloxseg4_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10242,9 +10103,9 @@ define <vscale x 2 x i16> @test_vloxseg4_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10275,9 +10136,9 @@ define <vscale x 2 x i16> @test_vloxseg4_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10307,13 +10168,12 @@ entry:
 define <vscale x 2 x i16> @test_vloxseg4_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vloxseg4ei64.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vloxseg4.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10341,10 +10201,10 @@ define <vscale x 2 x i16> @test_vloxseg5_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10375,10 +10235,10 @@ define <vscale x 2 x i16> @test_vloxseg5_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10409,10 +10269,10 @@ define <vscale x 2 x i16> @test_vloxseg5_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10443,10 +10303,10 @@ define <vscale x 2 x i16> @test_vloxseg5_mask_nxv2i16_nxv2i64(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -10477,11 +10337,11 @@ define <vscale x 2 x i16> @test_vloxseg6_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10512,11 +10372,11 @@ define <vscale x 2 x i16> @test_vloxseg6_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10547,11 +10407,11 @@ define <vscale x 2 x i16> @test_vloxseg6_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10582,11 +10442,11 @@ define <vscale x 2 x i16> @test_vloxseg6_mask_nxv2i16_nxv2i64(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -10617,12 +10477,12 @@ define <vscale x 2 x i16> @test_vloxseg7_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10653,12 +10513,12 @@ define <vscale x 2 x i16> @test_vloxseg7_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10689,12 +10549,12 @@ define <vscale x 2 x i16> @test_vloxseg7_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10725,12 +10585,12 @@ define <vscale x 2 x i16> @test_vloxseg7_mask_nxv2i16_nxv2i64(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -10761,13 +10621,13 @@ define <vscale x 2 x i16> @test_vloxseg8_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10798,13 +10658,13 @@ define <vscale x 2 x i16> @test_vloxseg8_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10835,13 +10695,13 @@ define <vscale x 2 x i16> @test_vloxseg8_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10872,13 +10732,13 @@ define <vscale x 2 x i16> @test_vloxseg8_mask_nxv2i16_nxv2i64(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -10911,7 +10771,6 @@ define <vscale x 2 x i64> @test_vloxseg2_mask_nxv2i64_nxv2i32(<vscale x 2 x i64>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10941,7 +10800,6 @@ define <vscale x 2 x i64> @test_vloxseg2_mask_nxv2i64_nxv2i8(<vscale x 2 x i64>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10971,7 +10829,6 @@ define <vscale x 2 x i64> @test_vloxseg2_mask_nxv2i64_nxv2i16(<vscale x 2 x i64>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -11001,7 +10858,6 @@ define <vscale x 2 x i64> @test_vloxseg2_mask_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg2.mask.nxv2i64.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -11028,12 +10884,11 @@ entry:
 define <vscale x 2 x i64> @test_vloxseg3_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei32.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -11060,12 +10915,11 @@ entry:
 define <vscale x 2 x i64> @test_vloxseg3_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -11092,12 +10946,11 @@ entry:
 define <vscale x 2 x i64> @test_vloxseg3_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -11124,12 +10977,11 @@ entry:
 define <vscale x 2 x i64> @test_vloxseg3_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei64.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vloxseg3.mask.nxv2i64.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -11157,9 +11009,9 @@ define <vscale x 2 x i64> @test_vloxseg4_mask_nxv2i64_nxv2i32(<vscale x 2 x i64>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -11190,9 +11042,9 @@ define <vscale x 2 x i64> @test_vloxseg4_mask_nxv2i64_nxv2i8(<vscale x 2 x i64>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -11223,9 +11075,9 @@ define <vscale x 2 x i64> @test_vloxseg4_mask_nxv2i64_nxv2i16(<vscale x 2 x i64>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -11256,9 +11108,9 @@ define <vscale x 2 x i64> @test_vloxseg4_mask_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -11291,7 +11143,6 @@ define <vscale x 16 x half> @test_vloxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -11321,7 +11172,6 @@ define <vscale x 16 x half> @test_vloxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -11351,7 +11201,6 @@ define <vscale x 16 x half> @test_vloxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vloxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -11381,7 +11230,6 @@ define <vscale x 4 x double> @test_vloxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x d
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -11411,7 +11259,6 @@ define <vscale x 4 x double> @test_vloxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x do
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -11441,7 +11288,6 @@ define <vscale x 4 x double> @test_vloxseg2_mask_nxv4f64_nxv4i64(<vscale x 4 x d
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -11471,7 +11317,6 @@ define <vscale x 4 x double> @test_vloxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x d
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vloxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -11501,7 +11346,6 @@ define <vscale x 1 x double> @test_vloxseg2_mask_nxv1f64_nxv1i64(<vscale x 1 x d
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -11531,7 +11375,6 @@ define <vscale x 1 x double> @test_vloxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -11561,7 +11404,6 @@ define <vscale x 1 x double> @test_vloxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -11591,7 +11433,6 @@ define <vscale x 1 x double> @test_vloxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -11618,12 +11459,11 @@ entry:
 define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei64.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -11650,12 +11490,11 @@ entry:
 define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -11682,12 +11521,11 @@ entry:
 define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -11714,12 +11552,11 @@ entry:
 define <vscale x 1 x double> @test_vloxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vloxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -11747,9 +11584,9 @@ define <vscale x 1 x double> @test_vloxseg4_mask_nxv1f64_nxv1i64(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11780,9 +11617,9 @@ define <vscale x 1 x double> @test_vloxseg4_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11813,9 +11650,9 @@ define <vscale x 1 x double> @test_vloxseg4_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11846,9 +11683,9 @@ define <vscale x 1 x double> @test_vloxseg4_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11879,10 +11716,10 @@ define <vscale x 1 x double> @test_vloxseg5_mask_nxv1f64_nxv1i64(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11913,10 +11750,10 @@ define <vscale x 1 x double> @test_vloxseg5_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11947,10 +11784,10 @@ define <vscale x 1 x double> @test_vloxseg5_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11981,10 +11818,10 @@ define <vscale x 1 x double> @test_vloxseg5_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12015,11 +11852,11 @@ define <vscale x 1 x double> @test_vloxseg6_mask_nxv1f64_nxv1i64(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12050,11 +11887,11 @@ define <vscale x 1 x double> @test_vloxseg6_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12085,11 +11922,11 @@ define <vscale x 1 x double> @test_vloxseg6_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12120,11 +11957,11 @@ define <vscale x 1 x double> @test_vloxseg6_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12155,12 +11992,12 @@ define <vscale x 1 x double> @test_vloxseg7_mask_nxv1f64_nxv1i64(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12191,12 +12028,12 @@ define <vscale x 1 x double> @test_vloxseg7_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12227,12 +12064,12 @@ define <vscale x 1 x double> @test_vloxseg7_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12263,12 +12100,12 @@ define <vscale x 1 x double> @test_vloxseg7_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12299,13 +12136,13 @@ define <vscale x 1 x double> @test_vloxseg8_mask_nxv1f64_nxv1i64(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12336,13 +12173,13 @@ define <vscale x 1 x double> @test_vloxseg8_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12373,13 +12210,13 @@ define <vscale x 1 x double> @test_vloxseg8_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12410,13 +12247,13 @@ define <vscale x 1 x double> @test_vloxseg8_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12449,7 +12286,6 @@ define <vscale x 2 x float> @test_vloxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12479,7 +12315,6 @@ define <vscale x 2 x float> @test_vloxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12509,7 +12344,6 @@ define <vscale x 2 x float> @test_vloxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12539,7 +12373,6 @@ define <vscale x 2 x float> @test_vloxseg2_mask_nxv2f32_nxv2i64(<vscale x 2 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg2.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12566,12 +12399,11 @@ entry:
 define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12598,12 +12430,11 @@ entry:
 define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12630,12 +12461,11 @@ entry:
 define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12663,10 +12493,9 @@ define <vscale x 2 x float> @test_vloxseg3_mask_nxv2f32_nxv2i64(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg3.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12694,9 +12523,9 @@ define <vscale x 2 x float> @test_vloxseg4_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12727,9 +12556,9 @@ define <vscale x 2 x float> @test_vloxseg4_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12760,9 +12589,9 @@ define <vscale x 2 x float> @test_vloxseg4_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12792,13 +12621,12 @@ entry:
 define <vscale x 2 x float> @test_vloxseg4_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vloxseg4ei64.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vloxseg4.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12826,10 +12654,10 @@ define <vscale x 2 x float> @test_vloxseg5_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12860,10 +12688,10 @@ define <vscale x 2 x float> @test_vloxseg5_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12894,10 +12722,10 @@ define <vscale x 2 x float> @test_vloxseg5_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12928,10 +12756,10 @@ define <vscale x 2 x float> @test_vloxseg5_mask_nxv2f32_nxv2i64(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -12962,11 +12790,11 @@ define <vscale x 2 x float> @test_vloxseg6_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12997,11 +12825,11 @@ define <vscale x 2 x float> @test_vloxseg6_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13032,11 +12860,11 @@ define <vscale x 2 x float> @test_vloxseg6_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13067,11 +12895,11 @@ define <vscale x 2 x float> @test_vloxseg6_mask_nxv2f32_nxv2i64(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -13102,12 +12930,12 @@ define <vscale x 2 x float> @test_vloxseg7_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13138,12 +12966,12 @@ define <vscale x 2 x float> @test_vloxseg7_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13174,12 +13002,12 @@ define <vscale x 2 x float> @test_vloxseg7_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13210,12 +13038,12 @@ define <vscale x 2 x float> @test_vloxseg7_mask_nxv2f32_nxv2i64(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -13246,13 +13074,13 @@ define <vscale x 2 x float> @test_vloxseg8_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13283,13 +13111,13 @@ define <vscale x 2 x float> @test_vloxseg8_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13320,13 +13148,13 @@ define <vscale x 2 x float> @test_vloxseg8_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13357,13 +13185,13 @@ define <vscale x 2 x float> @test_vloxseg8_mask_nxv2f32_nxv2i64(<vscale x 2 x fl
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -13396,7 +13224,6 @@ define <vscale x 1 x half> @test_vloxseg2_mask_nxv1f16_nxv1i64(<vscale x 1 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -13426,7 +13253,6 @@ define <vscale x 1 x half> @test_vloxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -13456,7 +13282,6 @@ define <vscale x 1 x half> @test_vloxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -13486,7 +13311,6 @@ define <vscale x 1 x half> @test_vloxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -13513,12 +13337,11 @@ entry:
 define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei64.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -13545,12 +13368,11 @@ entry:
 define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -13577,12 +13399,11 @@ entry:
 define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -13609,12 +13430,11 @@ entry:
 define <vscale x 1 x half> @test_vloxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vloxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -13642,9 +13462,9 @@ define <vscale x 1 x half> @test_vloxseg4_mask_nxv1f16_nxv1i64(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13675,9 +13495,9 @@ define <vscale x 1 x half> @test_vloxseg4_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13708,9 +13528,9 @@ define <vscale x 1 x half> @test_vloxseg4_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13741,9 +13561,9 @@ define <vscale x 1 x half> @test_vloxseg4_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13774,10 +13594,10 @@ define <vscale x 1 x half> @test_vloxseg5_mask_nxv1f16_nxv1i64(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13808,10 +13628,10 @@ define <vscale x 1 x half> @test_vloxseg5_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13842,10 +13662,10 @@ define <vscale x 1 x half> @test_vloxseg5_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13876,10 +13696,10 @@ define <vscale x 1 x half> @test_vloxseg5_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13910,11 +13730,11 @@ define <vscale x 1 x half> @test_vloxseg6_mask_nxv1f16_nxv1i64(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13945,11 +13765,11 @@ define <vscale x 1 x half> @test_vloxseg6_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13980,11 +13800,11 @@ define <vscale x 1 x half> @test_vloxseg6_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14015,11 +13835,11 @@ define <vscale x 1 x half> @test_vloxseg6_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14050,12 +13870,12 @@ define <vscale x 1 x half> @test_vloxseg7_mask_nxv1f16_nxv1i64(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14086,12 +13906,12 @@ define <vscale x 1 x half> @test_vloxseg7_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14122,12 +13942,12 @@ define <vscale x 1 x half> @test_vloxseg7_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14158,12 +13978,12 @@ define <vscale x 1 x half> @test_vloxseg7_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14194,13 +14014,13 @@ define <vscale x 1 x half> @test_vloxseg8_mask_nxv1f16_nxv1i64(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14231,13 +14051,13 @@ define <vscale x 1 x half> @test_vloxseg8_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14268,13 +14088,13 @@ define <vscale x 1 x half> @test_vloxseg8_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14305,13 +14125,13 @@ define <vscale x 1 x half> @test_vloxseg8_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14344,7 +14164,6 @@ define <vscale x 1 x float> @test_vloxseg2_mask_nxv1f32_nxv1i64(<vscale x 1 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -14374,7 +14193,6 @@ define <vscale x 1 x float> @test_vloxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -14404,7 +14222,6 @@ define <vscale x 1 x float> @test_vloxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -14434,7 +14251,6 @@ define <vscale x 1 x float> @test_vloxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -14461,12 +14277,11 @@ entry:
 define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei64.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -14493,12 +14308,11 @@ entry:
 define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -14525,12 +14339,11 @@ entry:
 define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -14557,12 +14370,11 @@ entry:
 define <vscale x 1 x float> @test_vloxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vloxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -14590,9 +14402,9 @@ define <vscale x 1 x float> @test_vloxseg4_mask_nxv1f32_nxv1i64(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14623,9 +14435,9 @@ define <vscale x 1 x float> @test_vloxseg4_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14656,9 +14468,9 @@ define <vscale x 1 x float> @test_vloxseg4_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14689,9 +14501,9 @@ define <vscale x 1 x float> @test_vloxseg4_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vloxseg4_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14722,10 +14534,10 @@ define <vscale x 1 x float> @test_vloxseg5_mask_nxv1f32_nxv1i64(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14756,10 +14568,10 @@ define <vscale x 1 x float> @test_vloxseg5_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14790,10 +14602,10 @@ define <vscale x 1 x float> @test_vloxseg5_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14824,10 +14636,10 @@ define <vscale x 1 x float> @test_vloxseg5_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vloxseg5_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14858,11 +14670,11 @@ define <vscale x 1 x float> @test_vloxseg6_mask_nxv1f32_nxv1i64(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14893,11 +14705,11 @@ define <vscale x 1 x float> @test_vloxseg6_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14928,11 +14740,11 @@ define <vscale x 1 x float> @test_vloxseg6_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14963,11 +14775,11 @@ define <vscale x 1 x float> @test_vloxseg6_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vloxseg6_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14998,12 +14810,12 @@ define <vscale x 1 x float> @test_vloxseg7_mask_nxv1f32_nxv1i64(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -15034,12 +14846,12 @@ define <vscale x 1 x float> @test_vloxseg7_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -15070,12 +14882,12 @@ define <vscale x 1 x float> @test_vloxseg7_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -15106,12 +14918,12 @@ define <vscale x 1 x float> @test_vloxseg7_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vloxseg7_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -15142,13 +14954,13 @@ define <vscale x 1 x float> @test_vloxseg8_mask_nxv1f32_nxv1i64(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -15179,13 +14991,13 @@ define <vscale x 1 x float> @test_vloxseg8_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -15216,13 +15028,13 @@ define <vscale x 1 x float> @test_vloxseg8_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -15253,13 +15065,13 @@ define <vscale x 1 x float> @test_vloxseg8_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vloxseg8_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -15292,7 +15104,6 @@ define <vscale x 8 x half> @test_vloxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x hal
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15322,7 +15133,6 @@ define <vscale x 8 x half> @test_vloxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15352,7 +15162,6 @@ define <vscale x 8 x half> @test_vloxseg2_mask_nxv8f16_nxv8i64(<vscale x 8 x hal
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15382,7 +15191,6 @@ define <vscale x 8 x half> @test_vloxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x hal
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15409,12 +15217,11 @@ entry:
 define <vscale x 8 x half> @test_vloxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15441,12 +15248,11 @@ entry:
 define <vscale x 8 x half> @test_vloxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15474,10 +15280,9 @@ define <vscale x 8 x half> @test_vloxseg3_mask_nxv8f16_nxv8i64(<vscale x 8 x hal
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg3ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15505,10 +15310,9 @@ define <vscale x 8 x half> @test_vloxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x hal
 ; CHECK-LABEL: test_vloxseg3_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15536,9 +15340,9 @@ define <vscale x 8 x half> @test_vloxseg4_mask_nxv8f16_nxv8i16(<vscale x 8 x hal
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -15569,9 +15373,9 @@ define <vscale x 8 x half> @test_vloxseg4_mask_nxv8f16_nxv8i8(<vscale x 8 x half
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -15602,11 +15406,10 @@ define <vscale x 8 x half> @test_vloxseg4_mask_nxv8f16_nxv8i64(<vscale x 8 x hal
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15633,13 +15436,12 @@ entry:
 define <vscale x 8 x half> @test_vloxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
+; CHECK-NEXT:    vloxseg4ei32.v v6, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vloxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15669,7 +15471,6 @@ define <vscale x 8 x float> @test_vloxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x fl
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15699,7 +15500,6 @@ define <vscale x 8 x float> @test_vloxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x flo
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15729,7 +15529,6 @@ define <vscale x 8 x float> @test_vloxseg2_mask_nxv8f32_nxv8i64(<vscale x 8 x fl
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v4, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i64(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15759,7 +15558,6 @@ define <vscale x 8 x float> @test_vloxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x fl
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vloxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15789,7 +15587,6 @@ define <vscale x 2 x double> @test_vloxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x d
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -15819,7 +15616,6 @@ define <vscale x 2 x double> @test_vloxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x do
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -15849,7 +15645,6 @@ define <vscale x 2 x double> @test_vloxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x d
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -15879,7 +15674,6 @@ define <vscale x 2 x double> @test_vloxseg2_mask_nxv2f64_nxv2i64(<vscale x 2 x d
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg2.mask.nxv2f64.nxv2i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -15906,12 +15700,11 @@ entry:
 define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei32.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -15938,12 +15731,11 @@ entry:
 define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -15970,12 +15762,11 @@ entry:
 define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -16002,12 +15793,11 @@ entry:
 define <vscale x 2 x double> @test_vloxseg3_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei64.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vloxseg3.mask.nxv2f64.nxv2i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -16035,9 +15825,9 @@ define <vscale x 2 x double> @test_vloxseg4_mask_nxv2f64_nxv2i32(<vscale x 2 x d
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -16068,9 +15858,9 @@ define <vscale x 2 x double> @test_vloxseg4_mask_nxv2f64_nxv2i8(<vscale x 2 x do
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -16101,9 +15891,9 @@ define <vscale x 2 x double> @test_vloxseg4_mask_nxv2f64_nxv2i16(<vscale x 2 x d
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -16134,9 +15924,9 @@ define <vscale x 2 x double> @test_vloxseg4_mask_nxv2f64_nxv2i64(<vscale x 2 x d
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -16169,7 +15959,6 @@ define <vscale x 4 x half> @test_vloxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16199,7 +15988,6 @@ define <vscale x 4 x half> @test_vloxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16229,7 +16017,6 @@ define <vscale x 4 x half> @test_vloxseg2_mask_nxv4f16_nxv4i64(<vscale x 4 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16259,7 +16046,6 @@ define <vscale x 4 x half> @test_vloxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16287,10 +16073,9 @@ define <vscale x 4 x half> @test_vloxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16317,12 +16102,11 @@ entry:
 define <vscale x 4 x half> @test_vloxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16350,10 +16134,9 @@ define <vscale x 4 x half> @test_vloxseg3_mask_nxv4f16_nxv4i64(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16380,12 +16163,11 @@ entry:
 define <vscale x 4 x half> @test_vloxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16412,13 +16194,12 @@ entry:
 define <vscale x 4 x half> @test_vloxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vloxseg4ei32.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16446,9 +16227,9 @@ define <vscale x 4 x half> @test_vloxseg4_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -16479,11 +16260,10 @@ define <vscale x 4 x half> @test_vloxseg4_mask_nxv4f16_nxv4i64(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg4.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16511,9 +16291,9 @@ define <vscale x 4 x half> @test_vloxseg4_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -16544,10 +16324,10 @@ define <vscale x 4 x half> @test_vloxseg5_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -16578,10 +16358,10 @@ define <vscale x 4 x half> @test_vloxseg5_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -16612,12 +16392,11 @@ define <vscale x 4 x half> @test_vloxseg5_mask_nxv4f16_nxv4i64(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg5.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16645,10 +16424,10 @@ define <vscale x 4 x half> @test_vloxseg5_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg5_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -16679,11 +16458,11 @@ define <vscale x 4 x half> @test_vloxseg6_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -16714,11 +16493,11 @@ define <vscale x 4 x half> @test_vloxseg6_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -16748,15 +16527,14 @@ entry:
 define <vscale x 4 x half> @test_vloxseg6_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
+; CHECK-NEXT:    vloxseg6ei64.v v7, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vloxseg6.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16784,11 +16562,11 @@ define <vscale x 4 x half> @test_vloxseg6_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg6_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -16819,12 +16597,12 @@ define <vscale x 4 x half> @test_vloxseg7_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -16855,12 +16633,12 @@ define <vscale x 4 x half> @test_vloxseg7_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -16891,12 +16669,12 @@ define <vscale x 4 x half> @test_vloxseg7_mask_nxv4f16_nxv4i64(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -16927,12 +16705,12 @@ define <vscale x 4 x half> @test_vloxseg7_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg7_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -16963,13 +16741,13 @@ define <vscale x 4 x half> @test_vloxseg8_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -17000,13 +16778,13 @@ define <vscale x 4 x half> @test_vloxseg8_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17037,13 +16815,13 @@ define <vscale x 4 x half> @test_vloxseg8_mask_nxv4f16_nxv4i64(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -17074,13 +16852,13 @@ define <vscale x 4 x half> @test_vloxseg8_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vloxseg8_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17113,7 +16891,6 @@ define <vscale x 2 x half> @test_vloxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17143,7 +16920,6 @@ define <vscale x 2 x half> @test_vloxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17173,7 +16949,6 @@ define <vscale x 2 x half> @test_vloxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17203,7 +16978,6 @@ define <vscale x 2 x half> @test_vloxseg2_mask_nxv2f16_nxv2i64(<vscale x 2 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg2.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17230,12 +17004,11 @@ entry:
 define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17262,12 +17035,11 @@ entry:
 define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17294,12 +17066,11 @@ entry:
 define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vloxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17327,10 +17098,9 @@ define <vscale x 2 x half> @test_vloxseg3_mask_nxv2f16_nxv2i64(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg3_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg3.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17358,9 +17128,9 @@ define <vscale x 2 x half> @test_vloxseg4_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17391,9 +17161,9 @@ define <vscale x 2 x half> @test_vloxseg4_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17424,9 +17194,9 @@ define <vscale x 2 x half> @test_vloxseg4_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17456,13 +17226,12 @@ entry:
 define <vscale x 2 x half> @test_vloxseg4_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vloxseg4ei64.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vloxseg4.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17490,10 +17259,10 @@ define <vscale x 2 x half> @test_vloxseg5_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17524,10 +17293,10 @@ define <vscale x 2 x half> @test_vloxseg5_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17558,10 +17327,10 @@ define <vscale x 2 x half> @test_vloxseg5_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17592,10 +17361,10 @@ define <vscale x 2 x half> @test_vloxseg5_mask_nxv2f16_nxv2i64(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg5_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -17626,11 +17395,11 @@ define <vscale x 2 x half> @test_vloxseg6_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17661,11 +17430,11 @@ define <vscale x 2 x half> @test_vloxseg6_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17696,11 +17465,11 @@ define <vscale x 2 x half> @test_vloxseg6_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17731,11 +17500,11 @@ define <vscale x 2 x half> @test_vloxseg6_mask_nxv2f16_nxv2i64(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg6_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -17766,12 +17535,12 @@ define <vscale x 2 x half> @test_vloxseg7_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17802,12 +17571,12 @@ define <vscale x 2 x half> @test_vloxseg7_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17838,12 +17607,12 @@ define <vscale x 2 x half> @test_vloxseg7_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17874,12 +17643,12 @@ define <vscale x 2 x half> @test_vloxseg7_mask_nxv2f16_nxv2i64(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg7_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -17910,13 +17679,13 @@ define <vscale x 2 x half> @test_vloxseg8_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17947,13 +17716,13 @@ define <vscale x 2 x half> @test_vloxseg8_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17984,13 +17753,13 @@ define <vscale x 2 x half> @test_vloxseg8_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -18021,13 +17790,13 @@ define <vscale x 2 x half> @test_vloxseg8_mask_nxv2f16_nxv2i64(<vscale x 2 x hal
 ; CHECK-LABEL: test_vloxseg8_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vloxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -18060,7 +17829,6 @@ define <vscale x 4 x float> @test_vloxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x fl
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18090,7 +17858,6 @@ define <vscale x 4 x float> @test_vloxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x flo
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18120,7 +17887,6 @@ define <vscale x 4 x float> @test_vloxseg2_mask_nxv4f32_nxv4i64(<vscale x 4 x fl
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18150,7 +17916,6 @@ define <vscale x 4 x float> @test_vloxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x fl
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18177,12 +17942,11 @@ entry:
 define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei32.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18209,12 +17973,11 @@ entry:
 define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18242,10 +18005,9 @@ define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i64(<vscale x 4 x fl
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg3ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18272,12 +18034,11 @@ entry:
 define <vscale x 4 x float> @test_vloxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg3_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vloxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18305,9 +18066,9 @@ define <vscale x 4 x float> @test_vloxseg4_mask_nxv4f32_nxv4i32(<vscale x 4 x fl
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -18338,9 +18099,9 @@ define <vscale x 4 x float> @test_vloxseg4_mask_nxv4f32_nxv4i8(<vscale x 4 x flo
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -18370,13 +18131,12 @@ entry:
 define <vscale x 4 x float> @test_vloxseg4_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxseg4ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
+; CHECK-NEXT:    vloxseg4ei64.v v6, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vloxseg4.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18404,9 +18164,9 @@ define <vscale x 4 x float> @test_vloxseg4_mask_nxv4f32_nxv4i16(<vscale x 4 x fl
 ; CHECK-LABEL: test_vloxseg4_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vloxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll
index 452b5be5c998f..98cba968170b2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv32.ll
@@ -10,7 +10,6 @@ define <vscale x 16 x i16> @test_vlseg2_nxv16i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v4, (a0)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i32 %vl)
@@ -25,7 +24,6 @@ define <vscale x 16 x i16> @test_vlseg2_mask_nxv16i16(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vlseg2e16.v v4, (a0)
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlseg2e16.v v4, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i32 %vl)
@@ -43,7 +41,6 @@ define <vscale x 1 x i8> @test_vlseg2_nxv1i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -58,7 +55,6 @@ define <vscale x 1 x i8> @test_vlseg2_mask_nxv1i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -76,7 +72,6 @@ define <vscale x 1 x i8> @test_vlseg3_nxv1i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlseg3e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -92,7 +87,6 @@ define <vscale x 1 x i8> @test_vlseg3_mask_nxv1i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -110,7 +104,6 @@ define <vscale x 1 x i8> @test_vlseg4_nxv1i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlseg4e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -127,7 +120,6 @@ define <vscale x 1 x i8> @test_vlseg4_mask_nxv1i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -145,7 +137,6 @@ define <vscale x 1 x i8> @test_vlseg5_nxv1i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlseg5e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -163,7 +154,6 @@ define <vscale x 1 x i8> @test_vlseg5_mask_nxv1i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -181,7 +171,6 @@ define <vscale x 1 x i8> @test_vlseg6_nxv1i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlseg6e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -200,7 +189,6 @@ define <vscale x 1 x i8> @test_vlseg6_mask_nxv1i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -218,7 +206,6 @@ define <vscale x 1 x i8> @test_vlseg7_nxv1i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlseg7e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -238,7 +225,6 @@ define <vscale x 1 x i8> @test_vlseg7_mask_nxv1i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -256,7 +242,6 @@ define <vscale x 1 x i8> @test_vlseg8_nxv1i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlseg8e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -277,7 +262,6 @@ define <vscale x 1 x i8> @test_vlseg8_mask_nxv1i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -295,7 +279,6 @@ define <vscale x 16 x i8> @test_vlseg2_nxv16i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vlseg2e8.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
@@ -310,7 +293,6 @@ define <vscale x 16 x i8> @test_vlseg2_mask_nxv16i8(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e8.v v6, (a0)
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlseg2e8.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
@@ -328,7 +310,6 @@ define <vscale x 16 x i8> @test_vlseg3_nxv16i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vlseg3e8.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
@@ -344,7 +325,6 @@ define <vscale x 16 x i8> @test_vlseg3_mask_nxv16i8(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlseg3e8.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
@@ -362,7 +342,6 @@ define <vscale x 16 x i8> @test_vlseg4_nxv16i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vlseg4e8.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
@@ -379,7 +358,6 @@ define <vscale x 16 x i8> @test_vlseg4_mask_nxv16i8(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlseg4e8.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
@@ -397,7 +375,6 @@ define <vscale x 2 x i32> @test_vlseg2_nxv2i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -412,7 +389,6 @@ define <vscale x 2 x i32> @test_vlseg2_mask_nxv2i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -430,7 +406,6 @@ define <vscale x 2 x i32> @test_vlseg3_nxv2i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg3e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -446,7 +421,6 @@ define <vscale x 2 x i32> @test_vlseg3_mask_nxv2i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -464,7 +438,6 @@ define <vscale x 2 x i32> @test_vlseg4_nxv2i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg4e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -481,7 +454,6 @@ define <vscale x 2 x i32> @test_vlseg4_mask_nxv2i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -499,7 +471,6 @@ define <vscale x 2 x i32> @test_vlseg5_nxv2i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg5e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -517,7 +488,6 @@ define <vscale x 2 x i32> @test_vlseg5_mask_nxv2i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -535,7 +505,6 @@ define <vscale x 2 x i32> @test_vlseg6_nxv2i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg6e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -554,7 +523,6 @@ define <vscale x 2 x i32> @test_vlseg6_mask_nxv2i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -572,7 +540,6 @@ define <vscale x 2 x i32> @test_vlseg7_nxv2i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg7e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -592,7 +559,6 @@ define <vscale x 2 x i32> @test_vlseg7_mask_nxv2i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -610,7 +576,6 @@ define <vscale x 2 x i32> @test_vlseg8_nxv2i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg8e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -631,7 +596,6 @@ define <vscale x 2 x i32> @test_vlseg8_mask_nxv2i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -649,7 +613,6 @@ define <vscale x 4 x i16> @test_vlseg2_nxv4i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -664,7 +627,6 @@ define <vscale x 4 x i16> @test_vlseg2_mask_nxv4i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -682,7 +644,6 @@ define <vscale x 4 x i16> @test_vlseg3_nxv4i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -698,7 +659,6 @@ define <vscale x 4 x i16> @test_vlseg3_mask_nxv4i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -716,7 +676,6 @@ define <vscale x 4 x i16> @test_vlseg4_nxv4i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -733,7 +692,6 @@ define <vscale x 4 x i16> @test_vlseg4_mask_nxv4i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -751,7 +709,6 @@ define <vscale x 4 x i16> @test_vlseg5_nxv4i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -769,7 +726,6 @@ define <vscale x 4 x i16> @test_vlseg5_mask_nxv4i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -787,7 +743,6 @@ define <vscale x 4 x i16> @test_vlseg6_nxv4i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -806,7 +761,6 @@ define <vscale x 4 x i16> @test_vlseg6_mask_nxv4i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -824,7 +778,6 @@ define <vscale x 4 x i16> @test_vlseg7_nxv4i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -844,7 +797,6 @@ define <vscale x 4 x i16> @test_vlseg7_mask_nxv4i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -862,7 +814,6 @@ define <vscale x 4 x i16> @test_vlseg8_nxv4i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -883,7 +834,6 @@ define <vscale x 4 x i16> @test_vlseg8_mask_nxv4i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -901,7 +851,6 @@ define <vscale x 1 x i32> @test_vlseg2_nxv1i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -916,7 +865,6 @@ define <vscale x 1 x i32> @test_vlseg2_mask_nxv1i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -934,7 +882,6 @@ define <vscale x 1 x i32> @test_vlseg3_nxv1i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg3e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -950,7 +897,6 @@ define <vscale x 1 x i32> @test_vlseg3_mask_nxv1i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -968,7 +914,6 @@ define <vscale x 1 x i32> @test_vlseg4_nxv1i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg4e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -985,7 +930,6 @@ define <vscale x 1 x i32> @test_vlseg4_mask_nxv1i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -1003,7 +947,6 @@ define <vscale x 1 x i32> @test_vlseg5_nxv1i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg5e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -1021,7 +964,6 @@ define <vscale x 1 x i32> @test_vlseg5_mask_nxv1i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -1039,7 +981,6 @@ define <vscale x 1 x i32> @test_vlseg6_nxv1i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg6e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -1058,7 +999,6 @@ define <vscale x 1 x i32> @test_vlseg6_mask_nxv1i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -1076,7 +1016,6 @@ define <vscale x 1 x i32> @test_vlseg7_nxv1i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg7e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -1096,7 +1035,6 @@ define <vscale x 1 x i32> @test_vlseg7_mask_nxv1i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -1114,7 +1052,6 @@ define <vscale x 1 x i32> @test_vlseg8_nxv1i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg8e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -1135,7 +1072,6 @@ define <vscale x 1 x i32> @test_vlseg8_mask_nxv1i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -1153,7 +1089,6 @@ define <vscale x 8 x i16> @test_vlseg2_nxv8i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
@@ -1168,7 +1103,6 @@ define <vscale x 8 x i16> @test_vlseg2_mask_nxv8i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e16.v v6, (a0)
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlseg2e16.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
@@ -1186,7 +1120,6 @@ define <vscale x 8 x i16> @test_vlseg3_nxv8i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vlseg3e16.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
@@ -1202,7 +1135,6 @@ define <vscale x 8 x i16> @test_vlseg3_mask_nxv8i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlseg3e16.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
@@ -1220,7 +1152,6 @@ define <vscale x 8 x i16> @test_vlseg4_nxv8i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vlseg4e16.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
@@ -1237,7 +1168,6 @@ define <vscale x 8 x i16> @test_vlseg4_mask_nxv8i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlseg4e16.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
@@ -1255,7 +1185,6 @@ define <vscale x 8 x i8> @test_vlseg2_nxv8i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1270,7 +1199,6 @@ define <vscale x 8 x i8> @test_vlseg2_mask_nxv8i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1288,7 +1216,6 @@ define <vscale x 8 x i8> @test_vlseg3_nxv8i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vlseg3e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1304,7 +1231,6 @@ define <vscale x 8 x i8> @test_vlseg3_mask_nxv8i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1322,7 +1248,6 @@ define <vscale x 8 x i8> @test_vlseg4_nxv8i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vlseg4e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1339,7 +1264,6 @@ define <vscale x 8 x i8> @test_vlseg4_mask_nxv8i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1357,7 +1281,6 @@ define <vscale x 8 x i8> @test_vlseg5_nxv8i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vlseg5e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1375,7 +1298,6 @@ define <vscale x 8 x i8> @test_vlseg5_mask_nxv8i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1393,7 +1315,6 @@ define <vscale x 8 x i8> @test_vlseg6_nxv8i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vlseg6e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1412,7 +1333,6 @@ define <vscale x 8 x i8> @test_vlseg6_mask_nxv8i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1430,7 +1350,6 @@ define <vscale x 8 x i8> @test_vlseg7_nxv8i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vlseg7e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1450,7 +1369,6 @@ define <vscale x 8 x i8> @test_vlseg7_mask_nxv8i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1468,7 +1386,6 @@ define <vscale x 8 x i8> @test_vlseg8_nxv8i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vlseg8e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1489,7 +1406,6 @@ define <vscale x 8 x i8> @test_vlseg8_mask_nxv8i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1507,7 +1423,6 @@ define <vscale x 8 x i32> @test_vlseg2_nxv8i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vlseg2e32.v v4, (a0)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i32 %vl)
@@ -1522,7 +1437,6 @@ define <vscale x 8 x i32> @test_vlseg2_mask_nxv8i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e32.v v4, (a0)
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlseg2e32.v v4, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i32 %vl)
@@ -1540,7 +1454,6 @@ define <vscale x 4 x i8> @test_vlseg2_nxv4i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1555,7 +1468,6 @@ define <vscale x 4 x i8> @test_vlseg2_mask_nxv4i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1573,7 +1485,6 @@ define <vscale x 4 x i8> @test_vlseg3_nxv4i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg3e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1589,7 +1500,6 @@ define <vscale x 4 x i8> @test_vlseg3_mask_nxv4i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1607,7 +1517,6 @@ define <vscale x 4 x i8> @test_vlseg4_nxv4i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg4e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1624,7 +1533,6 @@ define <vscale x 4 x i8> @test_vlseg4_mask_nxv4i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1642,7 +1550,6 @@ define <vscale x 4 x i8> @test_vlseg5_nxv4i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg5e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1660,7 +1567,6 @@ define <vscale x 4 x i8> @test_vlseg5_mask_nxv4i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1678,7 +1584,6 @@ define <vscale x 4 x i8> @test_vlseg6_nxv4i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg6e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1697,7 +1602,6 @@ define <vscale x 4 x i8> @test_vlseg6_mask_nxv4i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1715,7 +1619,6 @@ define <vscale x 4 x i8> @test_vlseg7_nxv4i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg7e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1735,7 +1638,6 @@ define <vscale x 4 x i8> @test_vlseg7_mask_nxv4i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1753,7 +1655,6 @@ define <vscale x 4 x i8> @test_vlseg8_nxv4i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg8e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1774,7 +1675,6 @@ define <vscale x 4 x i8> @test_vlseg8_mask_nxv4i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1792,7 +1692,6 @@ define <vscale x 1 x i16> @test_vlseg2_nxv1i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -1807,7 +1706,6 @@ define <vscale x 1 x i16> @test_vlseg2_mask_nxv1i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -1825,7 +1723,6 @@ define <vscale x 1 x i16> @test_vlseg3_nxv1i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -1841,7 +1738,6 @@ define <vscale x 1 x i16> @test_vlseg3_mask_nxv1i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -1859,7 +1755,6 @@ define <vscale x 1 x i16> @test_vlseg4_nxv1i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -1876,7 +1771,6 @@ define <vscale x 1 x i16> @test_vlseg4_mask_nxv1i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -1894,7 +1788,6 @@ define <vscale x 1 x i16> @test_vlseg5_nxv1i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -1912,7 +1805,6 @@ define <vscale x 1 x i16> @test_vlseg5_mask_nxv1i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -1930,7 +1822,6 @@ define <vscale x 1 x i16> @test_vlseg6_nxv1i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -1949,7 +1840,6 @@ define <vscale x 1 x i16> @test_vlseg6_mask_nxv1i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -1967,7 +1857,6 @@ define <vscale x 1 x i16> @test_vlseg7_nxv1i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -1987,7 +1876,6 @@ define <vscale x 1 x i16> @test_vlseg7_mask_nxv1i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -2005,7 +1893,6 @@ define <vscale x 1 x i16> @test_vlseg8_nxv1i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -2026,7 +1913,6 @@ define <vscale x 1 x i16> @test_vlseg8_mask_nxv1i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -2044,7 +1930,6 @@ define <vscale x 32 x i8> @test_vlseg2_nxv32i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vlseg2e8.v v4, (a0)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i32 %vl)
@@ -2059,7 +1944,6 @@ define <vscale x 32 x i8> @test_vlseg2_mask_nxv32i8(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e8.v v4, (a0)
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlseg2e8.v v4, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i32 %vl)
@@ -2077,7 +1961,6 @@ define <vscale x 2 x i8> @test_vlseg2_nxv2i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2092,7 +1975,6 @@ define <vscale x 2 x i8> @test_vlseg2_mask_nxv2i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2110,7 +1992,6 @@ define <vscale x 2 x i8> @test_vlseg3_nxv2i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlseg3e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2126,7 +2007,6 @@ define <vscale x 2 x i8> @test_vlseg3_mask_nxv2i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2144,7 +2024,6 @@ define <vscale x 2 x i8> @test_vlseg4_nxv2i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlseg4e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2161,7 +2040,6 @@ define <vscale x 2 x i8> @test_vlseg4_mask_nxv2i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2179,7 +2057,6 @@ define <vscale x 2 x i8> @test_vlseg5_nxv2i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlseg5e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2197,7 +2074,6 @@ define <vscale x 2 x i8> @test_vlseg5_mask_nxv2i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2215,7 +2091,6 @@ define <vscale x 2 x i8> @test_vlseg6_nxv2i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlseg6e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2234,7 +2109,6 @@ define <vscale x 2 x i8> @test_vlseg6_mask_nxv2i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2252,7 +2126,6 @@ define <vscale x 2 x i8> @test_vlseg7_nxv2i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlseg7e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2272,7 +2145,6 @@ define <vscale x 2 x i8> @test_vlseg7_mask_nxv2i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2290,7 +2162,6 @@ define <vscale x 2 x i8> @test_vlseg8_nxv2i8(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlseg8e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2311,7 +2182,6 @@ define <vscale x 2 x i8> @test_vlseg8_mask_nxv2i8(ptr %base, i32 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2329,7 +2199,6 @@ define <vscale x 2 x i16> @test_vlseg2_nxv2i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2344,7 +2213,6 @@ define <vscale x 2 x i16> @test_vlseg2_mask_nxv2i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2362,7 +2230,6 @@ define <vscale x 2 x i16> @test_vlseg3_nxv2i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2378,7 +2245,6 @@ define <vscale x 2 x i16> @test_vlseg3_mask_nxv2i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2396,7 +2262,6 @@ define <vscale x 2 x i16> @test_vlseg4_nxv2i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2413,7 +2278,6 @@ define <vscale x 2 x i16> @test_vlseg4_mask_nxv2i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2431,7 +2295,6 @@ define <vscale x 2 x i16> @test_vlseg5_nxv2i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2449,7 +2312,6 @@ define <vscale x 2 x i16> @test_vlseg5_mask_nxv2i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2467,7 +2329,6 @@ define <vscale x 2 x i16> @test_vlseg6_nxv2i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2486,7 +2347,6 @@ define <vscale x 2 x i16> @test_vlseg6_mask_nxv2i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2504,7 +2364,6 @@ define <vscale x 2 x i16> @test_vlseg7_nxv2i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2524,7 +2383,6 @@ define <vscale x 2 x i16> @test_vlseg7_mask_nxv2i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2542,7 +2400,6 @@ define <vscale x 2 x i16> @test_vlseg8_nxv2i16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2563,7 +2420,6 @@ define <vscale x 2 x i16> @test_vlseg8_mask_nxv2i16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2581,7 +2437,6 @@ define <vscale x 4 x i32> @test_vlseg2_nxv4i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vlseg2e32.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
@@ -2596,7 +2451,6 @@ define <vscale x 4 x i32> @test_vlseg2_mask_nxv4i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e32.v v6, (a0)
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlseg2e32.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
@@ -2614,7 +2468,6 @@ define <vscale x 4 x i32> @test_vlseg3_nxv4i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vlseg3e32.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
@@ -2630,7 +2483,6 @@ define <vscale x 4 x i32> @test_vlseg3_mask_nxv4i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlseg3e32.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
@@ -2648,7 +2500,6 @@ define <vscale x 4 x i32> @test_vlseg4_nxv4i32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vlseg4e32.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
@@ -2665,7 +2516,6 @@ define <vscale x 4 x i32> @test_vlseg4_mask_nxv4i32(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlseg4e32.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
@@ -2683,7 +2533,6 @@ define <vscale x 16 x half> @test_vlseg2_nxv16f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v4, (a0)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i32 %vl)
@@ -2698,7 +2547,6 @@ define <vscale x 16 x half> @test_vlseg2_mask_nxv16f16(ptr %base, i32 %vl, <vsca
 ; CHECK-NEXT:    vlseg2e16.v v4, (a0)
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlseg2e16.v v4, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i32 %vl)
@@ -2716,7 +2564,6 @@ define <vscale x 4 x double> @test_vlseg2_nxv4f64(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vlseg2e64.v v4, (a0)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i32 %vl)
@@ -2731,7 +2578,6 @@ define <vscale x 4 x double> @test_vlseg2_mask_nxv4f64(ptr %base, i32 %vl, <vsca
 ; CHECK-NEXT:    vlseg2e64.v v4, (a0)
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlseg2e64.v v4, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i32 %vl)
@@ -2749,7 +2595,6 @@ define <vscale x 1 x double> @test_vlseg2_nxv1f64(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg2e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -2764,7 +2609,6 @@ define <vscale x 1 x double> @test_vlseg2_mask_nxv1f64(ptr %base, i32 %vl, <vsca
 ; CHECK-NEXT:    vlseg2e64.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -2782,7 +2626,6 @@ define <vscale x 1 x double> @test_vlseg3_nxv1f64(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg3e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -2798,7 +2641,6 @@ define <vscale x 1 x double> @test_vlseg3_mask_nxv1f64(ptr %base, i32 %vl, <vsca
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -2816,7 +2658,6 @@ define <vscale x 1 x double> @test_vlseg4_nxv1f64(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg4e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -2833,7 +2674,6 @@ define <vscale x 1 x double> @test_vlseg4_mask_nxv1f64(ptr %base, i32 %vl, <vsca
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -2851,7 +2691,6 @@ define <vscale x 1 x double> @test_vlseg5_nxv1f64(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg5e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -2869,7 +2708,6 @@ define <vscale x 1 x double> @test_vlseg5_mask_nxv1f64(ptr %base, i32 %vl, <vsca
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -2887,7 +2725,6 @@ define <vscale x 1 x double> @test_vlseg6_nxv1f64(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg6e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -2906,7 +2743,6 @@ define <vscale x 1 x double> @test_vlseg6_mask_nxv1f64(ptr %base, i32 %vl, <vsca
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -2924,7 +2760,6 @@ define <vscale x 1 x double> @test_vlseg7_nxv1f64(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg7e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -2944,7 +2779,6 @@ define <vscale x 1 x double> @test_vlseg7_mask_nxv1f64(ptr %base, i32 %vl, <vsca
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -2962,7 +2796,6 @@ define <vscale x 1 x double> @test_vlseg8_nxv1f64(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg8e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -2983,7 +2816,6 @@ define <vscale x 1 x double> @test_vlseg8_mask_nxv1f64(ptr %base, i32 %vl, <vsca
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -3001,7 +2833,6 @@ define <vscale x 2 x float> @test_vlseg2_nxv2f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3016,7 +2847,6 @@ define <vscale x 2 x float> @test_vlseg2_mask_nxv2f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3034,7 +2864,6 @@ define <vscale x 2 x float> @test_vlseg3_nxv2f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg3e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3050,7 +2879,6 @@ define <vscale x 2 x float> @test_vlseg3_mask_nxv2f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3068,7 +2896,6 @@ define <vscale x 2 x float> @test_vlseg4_nxv2f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg4e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3085,7 +2912,6 @@ define <vscale x 2 x float> @test_vlseg4_mask_nxv2f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3103,7 +2929,6 @@ define <vscale x 2 x float> @test_vlseg5_nxv2f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg5e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3121,7 +2946,6 @@ define <vscale x 2 x float> @test_vlseg5_mask_nxv2f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3139,7 +2963,6 @@ define <vscale x 2 x float> @test_vlseg6_nxv2f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg6e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3158,7 +2981,6 @@ define <vscale x 2 x float> @test_vlseg6_mask_nxv2f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3176,7 +2998,6 @@ define <vscale x 2 x float> @test_vlseg7_nxv2f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg7e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3196,7 +3017,6 @@ define <vscale x 2 x float> @test_vlseg7_mask_nxv2f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3214,7 +3034,6 @@ define <vscale x 2 x float> @test_vlseg8_nxv2f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg8e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3235,7 +3054,6 @@ define <vscale x 2 x float> @test_vlseg8_mask_nxv2f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3253,7 +3071,6 @@ define <vscale x 1 x half> @test_vlseg2_nxv1f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3268,7 +3085,6 @@ define <vscale x 1 x half> @test_vlseg2_mask_nxv1f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3286,7 +3102,6 @@ define <vscale x 1 x half> @test_vlseg3_nxv1f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3302,7 +3117,6 @@ define <vscale x 1 x half> @test_vlseg3_mask_nxv1f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3320,7 +3134,6 @@ define <vscale x 1 x half> @test_vlseg4_nxv1f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3337,7 +3150,6 @@ define <vscale x 1 x half> @test_vlseg4_mask_nxv1f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3355,7 +3167,6 @@ define <vscale x 1 x half> @test_vlseg5_nxv1f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3373,7 +3184,6 @@ define <vscale x 1 x half> @test_vlseg5_mask_nxv1f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3391,7 +3201,6 @@ define <vscale x 1 x half> @test_vlseg6_nxv1f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3410,7 +3219,6 @@ define <vscale x 1 x half> @test_vlseg6_mask_nxv1f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3428,7 +3236,6 @@ define <vscale x 1 x half> @test_vlseg7_nxv1f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3448,7 +3255,6 @@ define <vscale x 1 x half> @test_vlseg7_mask_nxv1f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3466,7 +3272,6 @@ define <vscale x 1 x half> @test_vlseg8_nxv1f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3487,7 +3292,6 @@ define <vscale x 1 x half> @test_vlseg8_mask_nxv1f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3505,7 +3309,6 @@ define <vscale x 1 x float> @test_vlseg2_nxv1f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -3520,7 +3323,6 @@ define <vscale x 1 x float> @test_vlseg2_mask_nxv1f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -3538,7 +3340,6 @@ define <vscale x 1 x float> @test_vlseg3_nxv1f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg3e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -3554,7 +3355,6 @@ define <vscale x 1 x float> @test_vlseg3_mask_nxv1f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -3572,7 +3372,6 @@ define <vscale x 1 x float> @test_vlseg4_nxv1f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg4e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -3589,7 +3388,6 @@ define <vscale x 1 x float> @test_vlseg4_mask_nxv1f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -3607,7 +3405,6 @@ define <vscale x 1 x float> @test_vlseg5_nxv1f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg5e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -3625,7 +3422,6 @@ define <vscale x 1 x float> @test_vlseg5_mask_nxv1f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -3643,7 +3439,6 @@ define <vscale x 1 x float> @test_vlseg6_nxv1f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg6e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -3662,7 +3457,6 @@ define <vscale x 1 x float> @test_vlseg6_mask_nxv1f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -3680,7 +3474,6 @@ define <vscale x 1 x float> @test_vlseg7_nxv1f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg7e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -3700,7 +3493,6 @@ define <vscale x 1 x float> @test_vlseg7_mask_nxv1f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -3718,7 +3510,6 @@ define <vscale x 1 x float> @test_vlseg8_nxv1f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg8e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -3739,7 +3530,6 @@ define <vscale x 1 x float> @test_vlseg8_mask_nxv1f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -3757,7 +3547,6 @@ define <vscale x 8 x half> @test_vlseg2_nxv8f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
@@ -3772,7 +3561,6 @@ define <vscale x 8 x half> @test_vlseg2_mask_nxv8f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e16.v v6, (a0)
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlseg2e16.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
@@ -3790,7 +3578,6 @@ define <vscale x 8 x half> @test_vlseg3_nxv8f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vlseg3e16.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
@@ -3806,7 +3593,6 @@ define <vscale x 8 x half> @test_vlseg3_mask_nxv8f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlseg3e16.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
@@ -3824,7 +3610,6 @@ define <vscale x 8 x half> @test_vlseg4_nxv8f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vlseg4e16.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
@@ -3841,7 +3626,6 @@ define <vscale x 8 x half> @test_vlseg4_mask_nxv8f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlseg4e16.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
@@ -3859,7 +3643,6 @@ define <vscale x 8 x float> @test_vlseg2_nxv8f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vlseg2e32.v v4, (a0)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i32 %vl)
@@ -3874,7 +3657,6 @@ define <vscale x 8 x float> @test_vlseg2_mask_nxv8f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vlseg2e32.v v4, (a0)
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlseg2e32.v v4, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i32 %vl)
@@ -3892,7 +3674,6 @@ define <vscale x 2 x double> @test_vlseg2_nxv2f64(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vlseg2e64.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
@@ -3907,7 +3688,6 @@ define <vscale x 2 x double> @test_vlseg2_mask_nxv2f64(ptr %base, i32 %vl, <vsca
 ; CHECK-NEXT:    vlseg2e64.v v6, (a0)
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlseg2e64.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
@@ -3925,7 +3705,6 @@ define <vscale x 2 x double> @test_vlseg3_nxv2f64(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vlseg3e64.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
@@ -3941,7 +3720,6 @@ define <vscale x 2 x double> @test_vlseg3_mask_nxv2f64(ptr %base, i32 %vl, <vsca
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlseg3e64.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
@@ -3959,7 +3737,6 @@ define <vscale x 2 x double> @test_vlseg4_nxv2f64(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vlseg4e64.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
@@ -3976,7 +3753,6 @@ define <vscale x 2 x double> @test_vlseg4_mask_nxv2f64(ptr %base, i32 %vl, <vsca
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlseg4e64.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
@@ -3994,7 +3770,6 @@ define <vscale x 4 x half> @test_vlseg2_nxv4f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4009,7 +3784,6 @@ define <vscale x 4 x half> @test_vlseg2_mask_nxv4f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4027,7 +3801,6 @@ define <vscale x 4 x half> @test_vlseg3_nxv4f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4043,7 +3816,6 @@ define <vscale x 4 x half> @test_vlseg3_mask_nxv4f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4061,7 +3833,6 @@ define <vscale x 4 x half> @test_vlseg4_nxv4f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4078,7 +3849,6 @@ define <vscale x 4 x half> @test_vlseg4_mask_nxv4f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4096,7 +3866,6 @@ define <vscale x 4 x half> @test_vlseg5_nxv4f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4114,7 +3883,6 @@ define <vscale x 4 x half> @test_vlseg5_mask_nxv4f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4132,7 +3900,6 @@ define <vscale x 4 x half> @test_vlseg6_nxv4f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4151,7 +3918,6 @@ define <vscale x 4 x half> @test_vlseg6_mask_nxv4f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4169,7 +3935,6 @@ define <vscale x 4 x half> @test_vlseg7_nxv4f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4189,7 +3954,6 @@ define <vscale x 4 x half> @test_vlseg7_mask_nxv4f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4207,7 +3971,6 @@ define <vscale x 4 x half> @test_vlseg8_nxv4f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4228,7 +3991,6 @@ define <vscale x 4 x half> @test_vlseg8_mask_nxv4f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4246,7 +4008,6 @@ define <vscale x 2 x half> @test_vlseg2_nxv2f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4261,7 +4022,6 @@ define <vscale x 2 x half> @test_vlseg2_mask_nxv2f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4279,7 +4039,6 @@ define <vscale x 2 x half> @test_vlseg3_nxv2f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4295,7 +4054,6 @@ define <vscale x 2 x half> @test_vlseg3_mask_nxv2f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4313,7 +4071,6 @@ define <vscale x 2 x half> @test_vlseg4_nxv2f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4330,7 +4087,6 @@ define <vscale x 2 x half> @test_vlseg4_mask_nxv2f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4348,7 +4104,6 @@ define <vscale x 2 x half> @test_vlseg5_nxv2f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4366,7 +4121,6 @@ define <vscale x 2 x half> @test_vlseg5_mask_nxv2f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4384,7 +4138,6 @@ define <vscale x 2 x half> @test_vlseg6_nxv2f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4403,7 +4156,6 @@ define <vscale x 2 x half> @test_vlseg6_mask_nxv2f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4421,7 +4173,6 @@ define <vscale x 2 x half> @test_vlseg7_nxv2f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4441,7 +4192,6 @@ define <vscale x 2 x half> @test_vlseg7_mask_nxv2f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4459,7 +4209,6 @@ define <vscale x 2 x half> @test_vlseg8_nxv2f16(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4480,7 +4229,6 @@ define <vscale x 2 x half> @test_vlseg8_mask_nxv2f16(ptr %base, i32 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4498,7 +4246,6 @@ define <vscale x 4 x float> @test_vlseg2_nxv4f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vlseg2e32.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
@@ -4513,7 +4260,6 @@ define <vscale x 4 x float> @test_vlseg2_mask_nxv4f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vlseg2e32.v v6, (a0)
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlseg2e32.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
@@ -4531,7 +4277,6 @@ define <vscale x 4 x float> @test_vlseg3_nxv4f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vlseg3e32.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
@@ -4547,7 +4292,6 @@ define <vscale x 4 x float> @test_vlseg3_mask_nxv4f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlseg3e32.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
@@ -4565,7 +4309,6 @@ define <vscale x 4 x float> @test_vlseg4_nxv4f32(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vlseg4e32.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
@@ -4582,7 +4325,6 @@ define <vscale x 4 x float> @test_vlseg4_mask_nxv4f32(ptr %base, i32 %vl, <vscal
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlseg4e32.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll
index 12604322bbbcf..33672ce647a2f 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlseg-rv64.ll
@@ -10,7 +10,6 @@ define <vscale x 16 x i16> @test_vlseg2_nxv16i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v4, (a0)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 %vl)
@@ -25,7 +24,6 @@ define <vscale x 16 x i16> @test_vlseg2_mask_nxv16i16(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vlseg2e16.v v4, (a0)
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlseg2e16.v v4, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 %vl)
@@ -43,7 +41,6 @@ define <vscale x 4 x i32> @test_vlseg2_nxv4i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vlseg2e32.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
@@ -58,7 +55,6 @@ define <vscale x 4 x i32> @test_vlseg2_mask_nxv4i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e32.v v6, (a0)
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlseg2e32.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
@@ -76,7 +72,6 @@ define <vscale x 4 x i32> @test_vlseg3_nxv4i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vlseg3e32.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
@@ -92,7 +87,6 @@ define <vscale x 4 x i32> @test_vlseg3_mask_nxv4i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlseg3e32.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
@@ -110,7 +104,6 @@ define <vscale x 4 x i32> @test_vlseg4_nxv4i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vlseg4e32.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
@@ -127,7 +120,6 @@ define <vscale x 4 x i32> @test_vlseg4_mask_nxv4i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlseg4e32.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
@@ -145,7 +137,6 @@ define <vscale x 16 x i8> @test_vlseg2_nxv16i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vlseg2e8.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
@@ -160,7 +151,6 @@ define <vscale x 16 x i8> @test_vlseg2_mask_nxv16i8(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e8.v v6, (a0)
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlseg2e8.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
@@ -178,7 +168,6 @@ define <vscale x 16 x i8> @test_vlseg3_nxv16i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vlseg3e8.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
@@ -194,7 +183,6 @@ define <vscale x 16 x i8> @test_vlseg3_mask_nxv16i8(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlseg3e8.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
@@ -212,7 +200,6 @@ define <vscale x 16 x i8> @test_vlseg4_nxv16i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vlseg4e8.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
@@ -229,7 +216,6 @@ define <vscale x 16 x i8> @test_vlseg4_mask_nxv16i8(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlseg4e8.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
@@ -247,7 +233,6 @@ define <vscale x 1 x i64> @test_vlseg2_nxv1i64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg2e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg2.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -262,7 +247,6 @@ define <vscale x 1 x i64> @test_vlseg2_mask_nxv1i64(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e64.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg2.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -280,7 +264,6 @@ define <vscale x 1 x i64> @test_vlseg3_nxv1i64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg3e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg3.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -296,7 +279,6 @@ define <vscale x 1 x i64> @test_vlseg3_mask_nxv1i64(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg3.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -314,7 +296,6 @@ define <vscale x 1 x i64> @test_vlseg4_nxv1i64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg4e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg4.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -331,7 +312,6 @@ define <vscale x 1 x i64> @test_vlseg4_mask_nxv1i64(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg4.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -349,7 +329,6 @@ define <vscale x 1 x i64> @test_vlseg5_nxv1i64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg5e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg5.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -367,7 +346,6 @@ define <vscale x 1 x i64> @test_vlseg5_mask_nxv1i64(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg5.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -385,7 +363,6 @@ define <vscale x 1 x i64> @test_vlseg6_nxv1i64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg6e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg6.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -404,7 +381,6 @@ define <vscale x 1 x i64> @test_vlseg6_mask_nxv1i64(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg6.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -422,7 +398,6 @@ define <vscale x 1 x i64> @test_vlseg7_nxv1i64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg7e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg7.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -442,7 +417,6 @@ define <vscale x 1 x i64> @test_vlseg7_mask_nxv1i64(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg7.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -460,7 +434,6 @@ define <vscale x 1 x i64> @test_vlseg8_nxv1i64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg8e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg8.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -481,7 +454,6 @@ define <vscale x 1 x i64> @test_vlseg8_mask_nxv1i64(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlseg8.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -499,7 +471,6 @@ define <vscale x 1 x i32> @test_vlseg2_nxv1i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -514,7 +485,6 @@ define <vscale x 1 x i32> @test_vlseg2_mask_nxv1i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -532,7 +502,6 @@ define <vscale x 1 x i32> @test_vlseg3_nxv1i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg3e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -548,7 +517,6 @@ define <vscale x 1 x i32> @test_vlseg3_mask_nxv1i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -566,7 +534,6 @@ define <vscale x 1 x i32> @test_vlseg4_nxv1i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg4e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -583,7 +550,6 @@ define <vscale x 1 x i32> @test_vlseg4_mask_nxv1i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -601,7 +567,6 @@ define <vscale x 1 x i32> @test_vlseg5_nxv1i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg5e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -619,7 +584,6 @@ define <vscale x 1 x i32> @test_vlseg5_mask_nxv1i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -637,7 +601,6 @@ define <vscale x 1 x i32> @test_vlseg6_nxv1i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg6e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -656,7 +619,6 @@ define <vscale x 1 x i32> @test_vlseg6_mask_nxv1i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -674,7 +636,6 @@ define <vscale x 1 x i32> @test_vlseg7_nxv1i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg7e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -694,7 +655,6 @@ define <vscale x 1 x i32> @test_vlseg7_mask_nxv1i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -712,7 +672,6 @@ define <vscale x 1 x i32> @test_vlseg8_nxv1i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg8e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -733,7 +692,6 @@ define <vscale x 1 x i32> @test_vlseg8_mask_nxv1i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -751,7 +709,6 @@ define <vscale x 8 x i16> @test_vlseg2_nxv8i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
@@ -766,7 +723,6 @@ define <vscale x 8 x i16> @test_vlseg2_mask_nxv8i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e16.v v6, (a0)
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlseg2e16.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
@@ -784,7 +740,6 @@ define <vscale x 8 x i16> @test_vlseg3_nxv8i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vlseg3e16.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
@@ -800,7 +755,6 @@ define <vscale x 8 x i16> @test_vlseg3_mask_nxv8i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlseg3e16.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
@@ -818,7 +772,6 @@ define <vscale x 8 x i16> @test_vlseg4_nxv8i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vlseg4e16.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
@@ -835,7 +788,6 @@ define <vscale x 8 x i16> @test_vlseg4_mask_nxv8i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlseg4e16.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
@@ -853,7 +805,6 @@ define <vscale x 4 x i8> @test_vlseg2_nxv4i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -868,7 +819,6 @@ define <vscale x 4 x i8> @test_vlseg2_mask_nxv4i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -886,7 +836,6 @@ define <vscale x 4 x i8> @test_vlseg3_nxv4i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg3e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -902,7 +851,6 @@ define <vscale x 4 x i8> @test_vlseg3_mask_nxv4i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -920,7 +868,6 @@ define <vscale x 4 x i8> @test_vlseg4_nxv4i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg4e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -937,7 +884,6 @@ define <vscale x 4 x i8> @test_vlseg4_mask_nxv4i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -955,7 +901,6 @@ define <vscale x 4 x i8> @test_vlseg5_nxv4i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg5e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -973,7 +918,6 @@ define <vscale x 4 x i8> @test_vlseg5_mask_nxv4i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -991,7 +935,6 @@ define <vscale x 4 x i8> @test_vlseg6_nxv4i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg6e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -1010,7 +953,6 @@ define <vscale x 4 x i8> @test_vlseg6_mask_nxv4i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -1028,7 +970,6 @@ define <vscale x 4 x i8> @test_vlseg7_nxv4i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg7e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -1048,7 +989,6 @@ define <vscale x 4 x i8> @test_vlseg7_mask_nxv4i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -1066,7 +1006,6 @@ define <vscale x 4 x i8> @test_vlseg8_nxv4i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlseg8e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -1087,7 +1026,6 @@ define <vscale x 4 x i8> @test_vlseg8_mask_nxv4i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -1105,7 +1043,6 @@ define <vscale x 1 x i16> @test_vlseg2_nxv1i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1120,7 +1057,6 @@ define <vscale x 1 x i16> @test_vlseg2_mask_nxv1i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1138,7 +1074,6 @@ define <vscale x 1 x i16> @test_vlseg3_nxv1i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1154,7 +1089,6 @@ define <vscale x 1 x i16> @test_vlseg3_mask_nxv1i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1172,7 +1106,6 @@ define <vscale x 1 x i16> @test_vlseg4_nxv1i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1189,7 +1122,6 @@ define <vscale x 1 x i16> @test_vlseg4_mask_nxv1i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1207,7 +1139,6 @@ define <vscale x 1 x i16> @test_vlseg5_nxv1i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1225,7 +1156,6 @@ define <vscale x 1 x i16> @test_vlseg5_mask_nxv1i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1243,7 +1173,6 @@ define <vscale x 1 x i16> @test_vlseg6_nxv1i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1262,7 +1191,6 @@ define <vscale x 1 x i16> @test_vlseg6_mask_nxv1i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1280,7 +1208,6 @@ define <vscale x 1 x i16> @test_vlseg7_nxv1i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1300,7 +1227,6 @@ define <vscale x 1 x i16> @test_vlseg7_mask_nxv1i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1318,7 +1244,6 @@ define <vscale x 1 x i16> @test_vlseg8_nxv1i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1339,7 +1264,6 @@ define <vscale x 1 x i16> @test_vlseg8_mask_nxv1i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1357,7 +1281,6 @@ define <vscale x 2 x i32> @test_vlseg2_nxv2i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1372,7 +1295,6 @@ define <vscale x 2 x i32> @test_vlseg2_mask_nxv2i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1390,7 +1312,6 @@ define <vscale x 2 x i32> @test_vlseg3_nxv2i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg3e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1406,7 +1327,6 @@ define <vscale x 2 x i32> @test_vlseg3_mask_nxv2i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1424,7 +1344,6 @@ define <vscale x 2 x i32> @test_vlseg4_nxv2i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg4e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1441,7 +1360,6 @@ define <vscale x 2 x i32> @test_vlseg4_mask_nxv2i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1459,7 +1377,6 @@ define <vscale x 2 x i32> @test_vlseg5_nxv2i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg5e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1477,7 +1394,6 @@ define <vscale x 2 x i32> @test_vlseg5_mask_nxv2i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1495,7 +1411,6 @@ define <vscale x 2 x i32> @test_vlseg6_nxv2i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg6e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1514,7 +1429,6 @@ define <vscale x 2 x i32> @test_vlseg6_mask_nxv2i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1532,7 +1446,6 @@ define <vscale x 2 x i32> @test_vlseg7_nxv2i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg7e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1552,7 +1465,6 @@ define <vscale x 2 x i32> @test_vlseg7_mask_nxv2i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1570,7 +1482,6 @@ define <vscale x 2 x i32> @test_vlseg8_nxv2i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg8e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1591,7 +1502,6 @@ define <vscale x 2 x i32> @test_vlseg8_mask_nxv2i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1609,7 +1519,6 @@ define <vscale x 8 x i8> @test_vlseg2_nxv8i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1624,7 +1533,6 @@ define <vscale x 8 x i8> @test_vlseg2_mask_nxv8i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1642,7 +1550,6 @@ define <vscale x 8 x i8> @test_vlseg3_nxv8i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vlseg3e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1658,7 +1565,6 @@ define <vscale x 8 x i8> @test_vlseg3_mask_nxv8i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1676,7 +1582,6 @@ define <vscale x 8 x i8> @test_vlseg4_nxv8i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vlseg4e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1693,7 +1598,6 @@ define <vscale x 8 x i8> @test_vlseg4_mask_nxv8i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1711,7 +1615,6 @@ define <vscale x 8 x i8> @test_vlseg5_nxv8i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vlseg5e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1729,7 +1632,6 @@ define <vscale x 8 x i8> @test_vlseg5_mask_nxv8i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1747,7 +1649,6 @@ define <vscale x 8 x i8> @test_vlseg6_nxv8i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vlseg6e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1766,7 +1667,6 @@ define <vscale x 8 x i8> @test_vlseg6_mask_nxv8i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1784,7 +1684,6 @@ define <vscale x 8 x i8> @test_vlseg7_nxv8i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vlseg7e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1804,7 +1703,6 @@ define <vscale x 8 x i8> @test_vlseg7_mask_nxv8i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1822,7 +1720,6 @@ define <vscale x 8 x i8> @test_vlseg8_nxv8i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vlseg8e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1843,7 +1740,6 @@ define <vscale x 8 x i8> @test_vlseg8_mask_nxv8i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1861,7 +1757,6 @@ define <vscale x 4 x i64> @test_vlseg2_nxv4i64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vlseg2e64.v v4, (a0)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlseg2.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, ptr %base, i64 %vl)
@@ -1876,7 +1771,6 @@ define <vscale x 4 x i64> @test_vlseg2_mask_nxv4i64(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e64.v v4, (a0)
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlseg2e64.v v4, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlseg2.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, ptr %base, i64 %vl)
@@ -1894,7 +1788,6 @@ define <vscale x 4 x i16> @test_vlseg2_nxv4i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -1909,7 +1802,6 @@ define <vscale x 4 x i16> @test_vlseg2_mask_nxv4i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -1927,7 +1819,6 @@ define <vscale x 4 x i16> @test_vlseg3_nxv4i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -1943,7 +1834,6 @@ define <vscale x 4 x i16> @test_vlseg3_mask_nxv4i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -1961,7 +1851,6 @@ define <vscale x 4 x i16> @test_vlseg4_nxv4i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -1978,7 +1867,6 @@ define <vscale x 4 x i16> @test_vlseg4_mask_nxv4i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -1996,7 +1884,6 @@ define <vscale x 4 x i16> @test_vlseg5_nxv4i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -2014,7 +1901,6 @@ define <vscale x 4 x i16> @test_vlseg5_mask_nxv4i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -2032,7 +1918,6 @@ define <vscale x 4 x i16> @test_vlseg6_nxv4i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -2051,7 +1936,6 @@ define <vscale x 4 x i16> @test_vlseg6_mask_nxv4i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -2069,7 +1953,6 @@ define <vscale x 4 x i16> @test_vlseg7_nxv4i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -2089,7 +1972,6 @@ define <vscale x 4 x i16> @test_vlseg7_mask_nxv4i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -2107,7 +1989,6 @@ define <vscale x 4 x i16> @test_vlseg8_nxv4i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -2128,7 +2009,6 @@ define <vscale x 4 x i16> @test_vlseg8_mask_nxv4i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -2146,7 +2026,6 @@ define <vscale x 1 x i8> @test_vlseg2_nxv1i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2161,7 +2040,6 @@ define <vscale x 1 x i8> @test_vlseg2_mask_nxv1i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2179,7 +2057,6 @@ define <vscale x 1 x i8> @test_vlseg3_nxv1i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlseg3e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2195,7 +2072,6 @@ define <vscale x 1 x i8> @test_vlseg3_mask_nxv1i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2213,7 +2089,6 @@ define <vscale x 1 x i8> @test_vlseg4_nxv1i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlseg4e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2230,7 +2105,6 @@ define <vscale x 1 x i8> @test_vlseg4_mask_nxv1i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2248,7 +2122,6 @@ define <vscale x 1 x i8> @test_vlseg5_nxv1i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlseg5e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2266,7 +2139,6 @@ define <vscale x 1 x i8> @test_vlseg5_mask_nxv1i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2284,7 +2156,6 @@ define <vscale x 1 x i8> @test_vlseg6_nxv1i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlseg6e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2303,7 +2174,6 @@ define <vscale x 1 x i8> @test_vlseg6_mask_nxv1i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2321,7 +2191,6 @@ define <vscale x 1 x i8> @test_vlseg7_nxv1i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlseg7e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2341,7 +2210,6 @@ define <vscale x 1 x i8> @test_vlseg7_mask_nxv1i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2359,7 +2227,6 @@ define <vscale x 1 x i8> @test_vlseg8_nxv1i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlseg8e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2380,7 +2247,6 @@ define <vscale x 1 x i8> @test_vlseg8_mask_nxv1i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2398,7 +2264,6 @@ define <vscale x 2 x i8> @test_vlseg2_nxv2i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2413,7 +2278,6 @@ define <vscale x 2 x i8> @test_vlseg2_mask_nxv2i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2431,7 +2295,6 @@ define <vscale x 2 x i8> @test_vlseg3_nxv2i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlseg3e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2447,7 +2310,6 @@ define <vscale x 2 x i8> @test_vlseg3_mask_nxv2i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2465,7 +2327,6 @@ define <vscale x 2 x i8> @test_vlseg4_nxv2i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlseg4e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2482,7 +2343,6 @@ define <vscale x 2 x i8> @test_vlseg4_mask_nxv2i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2500,7 +2360,6 @@ define <vscale x 2 x i8> @test_vlseg5_nxv2i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlseg5e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2518,7 +2377,6 @@ define <vscale x 2 x i8> @test_vlseg5_mask_nxv2i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2536,7 +2394,6 @@ define <vscale x 2 x i8> @test_vlseg6_nxv2i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlseg6e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2555,7 +2412,6 @@ define <vscale x 2 x i8> @test_vlseg6_mask_nxv2i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2573,7 +2429,6 @@ define <vscale x 2 x i8> @test_vlseg7_nxv2i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlseg7e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2593,7 +2448,6 @@ define <vscale x 2 x i8> @test_vlseg7_mask_nxv2i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2611,7 +2465,6 @@ define <vscale x 2 x i8> @test_vlseg8_nxv2i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlseg8e8.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2632,7 +2485,6 @@ define <vscale x 2 x i8> @test_vlseg8_mask_nxv2i8(ptr %base, i64 %vl, <vscale x
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e8.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2650,7 +2502,6 @@ define <vscale x 8 x i32> @test_vlseg2_nxv8i32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vlseg2e32.v v4, (a0)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i64 %vl)
@@ -2665,7 +2516,6 @@ define <vscale x 8 x i32> @test_vlseg2_mask_nxv8i32(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e32.v v4, (a0)
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlseg2e32.v v4, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i64 %vl)
@@ -2683,7 +2533,6 @@ define <vscale x 32 x i8> @test_vlseg2_nxv32i8(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vlseg2e8.v v4, (a0)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i64 %vl)
@@ -2698,7 +2547,6 @@ define <vscale x 32 x i8> @test_vlseg2_mask_nxv32i8(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e8.v v4, (a0)
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlseg2e8.v v4, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i64 %vl)
@@ -2716,7 +2564,6 @@ define <vscale x 2 x i16> @test_vlseg2_nxv2i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -2731,7 +2578,6 @@ define <vscale x 2 x i16> @test_vlseg2_mask_nxv2i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -2749,7 +2595,6 @@ define <vscale x 2 x i16> @test_vlseg3_nxv2i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -2765,7 +2610,6 @@ define <vscale x 2 x i16> @test_vlseg3_mask_nxv2i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -2783,7 +2627,6 @@ define <vscale x 2 x i16> @test_vlseg4_nxv2i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -2800,7 +2643,6 @@ define <vscale x 2 x i16> @test_vlseg4_mask_nxv2i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -2818,7 +2660,6 @@ define <vscale x 2 x i16> @test_vlseg5_nxv2i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -2836,7 +2677,6 @@ define <vscale x 2 x i16> @test_vlseg5_mask_nxv2i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -2854,7 +2694,6 @@ define <vscale x 2 x i16> @test_vlseg6_nxv2i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -2873,7 +2712,6 @@ define <vscale x 2 x i16> @test_vlseg6_mask_nxv2i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -2891,7 +2729,6 @@ define <vscale x 2 x i16> @test_vlseg7_nxv2i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -2911,7 +2748,6 @@ define <vscale x 2 x i16> @test_vlseg7_mask_nxv2i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -2929,7 +2765,6 @@ define <vscale x 2 x i16> @test_vlseg8_nxv2i16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -2950,7 +2785,6 @@ define <vscale x 2 x i16> @test_vlseg8_mask_nxv2i16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -2968,7 +2802,6 @@ define <vscale x 2 x i64> @test_vlseg2_nxv2i64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vlseg2e64.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg2.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
@@ -2983,7 +2816,6 @@ define <vscale x 2 x i64> @test_vlseg2_mask_nxv2i64(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e64.v v6, (a0)
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlseg2e64.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg2.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
@@ -3001,7 +2833,6 @@ define <vscale x 2 x i64> @test_vlseg3_nxv2i64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vlseg3e64.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg3.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
@@ -3017,7 +2848,6 @@ define <vscale x 2 x i64> @test_vlseg3_mask_nxv2i64(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlseg3e64.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg3.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
@@ -3035,7 +2865,6 @@ define <vscale x 2 x i64> @test_vlseg4_nxv2i64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vlseg4e64.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg4.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
@@ -3052,7 +2881,6 @@ define <vscale x 2 x i64> @test_vlseg4_mask_nxv2i64(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlseg4e64.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlseg4.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
@@ -3070,7 +2898,6 @@ define <vscale x 16 x half> @test_vlseg2_nxv16f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v4, (a0)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i64 %vl)
@@ -3085,7 +2912,6 @@ define <vscale x 16 x half> @test_vlseg2_mask_nxv16f16(ptr %base, i64 %vl, <vsca
 ; CHECK-NEXT:    vlseg2e16.v v4, (a0)
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlseg2e16.v v4, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i64 %vl)
@@ -3103,7 +2929,6 @@ define <vscale x 4 x double> @test_vlseg2_nxv4f64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vlseg2e64.v v4, (a0)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i64 %vl)
@@ -3118,7 +2943,6 @@ define <vscale x 4 x double> @test_vlseg2_mask_nxv4f64(ptr %base, i64 %vl, <vsca
 ; CHECK-NEXT:    vlseg2e64.v v4, (a0)
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlseg2e64.v v4, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i64 %vl)
@@ -3136,7 +2960,6 @@ define <vscale x 1 x double> @test_vlseg2_nxv1f64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg2e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3151,7 +2974,6 @@ define <vscale x 1 x double> @test_vlseg2_mask_nxv1f64(ptr %base, i64 %vl, <vsca
 ; CHECK-NEXT:    vlseg2e64.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3169,7 +2991,6 @@ define <vscale x 1 x double> @test_vlseg3_nxv1f64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg3e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3185,7 +3006,6 @@ define <vscale x 1 x double> @test_vlseg3_mask_nxv1f64(ptr %base, i64 %vl, <vsca
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3203,7 +3023,6 @@ define <vscale x 1 x double> @test_vlseg4_nxv1f64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg4e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3220,7 +3039,6 @@ define <vscale x 1 x double> @test_vlseg4_mask_nxv1f64(ptr %base, i64 %vl, <vsca
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3238,7 +3056,6 @@ define <vscale x 1 x double> @test_vlseg5_nxv1f64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg5e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3256,7 +3073,6 @@ define <vscale x 1 x double> @test_vlseg5_mask_nxv1f64(ptr %base, i64 %vl, <vsca
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3274,7 +3090,6 @@ define <vscale x 1 x double> @test_vlseg6_nxv1f64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg6e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3293,7 +3108,6 @@ define <vscale x 1 x double> @test_vlseg6_mask_nxv1f64(ptr %base, i64 %vl, <vsca
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3311,7 +3125,6 @@ define <vscale x 1 x double> @test_vlseg7_nxv1f64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg7e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3331,7 +3144,6 @@ define <vscale x 1 x double> @test_vlseg7_mask_nxv1f64(ptr %base, i64 %vl, <vsca
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3349,7 +3161,6 @@ define <vscale x 1 x double> @test_vlseg8_nxv1f64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vlseg8e64.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3370,7 +3181,6 @@ define <vscale x 1 x double> @test_vlseg8_mask_nxv1f64(ptr %base, i64 %vl, <vsca
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e64.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3388,7 +3198,6 @@ define <vscale x 2 x float> @test_vlseg2_nxv2f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3403,7 +3212,6 @@ define <vscale x 2 x float> @test_vlseg2_mask_nxv2f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3421,7 +3229,6 @@ define <vscale x 2 x float> @test_vlseg3_nxv2f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg3e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3437,7 +3244,6 @@ define <vscale x 2 x float> @test_vlseg3_mask_nxv2f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3455,7 +3261,6 @@ define <vscale x 2 x float> @test_vlseg4_nxv2f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg4e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3472,7 +3277,6 @@ define <vscale x 2 x float> @test_vlseg4_mask_nxv2f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3490,7 +3294,6 @@ define <vscale x 2 x float> @test_vlseg5_nxv2f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg5e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3508,7 +3311,6 @@ define <vscale x 2 x float> @test_vlseg5_mask_nxv2f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3526,7 +3328,6 @@ define <vscale x 2 x float> @test_vlseg6_nxv2f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg6e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3545,7 +3346,6 @@ define <vscale x 2 x float> @test_vlseg6_mask_nxv2f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3563,7 +3363,6 @@ define <vscale x 2 x float> @test_vlseg7_nxv2f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg7e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3583,7 +3382,6 @@ define <vscale x 2 x float> @test_vlseg7_mask_nxv2f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3601,7 +3399,6 @@ define <vscale x 2 x float> @test_vlseg8_nxv2f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vlseg8e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3622,7 +3419,6 @@ define <vscale x 2 x float> @test_vlseg8_mask_nxv2f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3640,7 +3436,6 @@ define <vscale x 1 x half> @test_vlseg2_nxv1f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -3655,7 +3450,6 @@ define <vscale x 1 x half> @test_vlseg2_mask_nxv1f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -3673,7 +3467,6 @@ define <vscale x 1 x half> @test_vlseg3_nxv1f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -3689,7 +3482,6 @@ define <vscale x 1 x half> @test_vlseg3_mask_nxv1f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -3707,7 +3499,6 @@ define <vscale x 1 x half> @test_vlseg4_nxv1f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -3724,7 +3515,6 @@ define <vscale x 1 x half> @test_vlseg4_mask_nxv1f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -3742,7 +3532,6 @@ define <vscale x 1 x half> @test_vlseg5_nxv1f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -3760,7 +3549,6 @@ define <vscale x 1 x half> @test_vlseg5_mask_nxv1f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -3778,7 +3566,6 @@ define <vscale x 1 x half> @test_vlseg6_nxv1f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -3797,7 +3584,6 @@ define <vscale x 1 x half> @test_vlseg6_mask_nxv1f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -3815,7 +3601,6 @@ define <vscale x 1 x half> @test_vlseg7_nxv1f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -3835,7 +3620,6 @@ define <vscale x 1 x half> @test_vlseg7_mask_nxv1f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -3853,7 +3637,6 @@ define <vscale x 1 x half> @test_vlseg8_nxv1f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -3874,7 +3657,6 @@ define <vscale x 1 x half> @test_vlseg8_mask_nxv1f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -3892,7 +3674,6 @@ define <vscale x 1 x float> @test_vlseg2_nxv1f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -3907,7 +3688,6 @@ define <vscale x 1 x float> @test_vlseg2_mask_nxv1f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -3925,7 +3705,6 @@ define <vscale x 1 x float> @test_vlseg3_nxv1f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg3e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -3941,7 +3720,6 @@ define <vscale x 1 x float> @test_vlseg3_mask_nxv1f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -3959,7 +3737,6 @@ define <vscale x 1 x float> @test_vlseg4_nxv1f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg4e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -3976,7 +3753,6 @@ define <vscale x 1 x float> @test_vlseg4_mask_nxv1f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -3994,7 +3770,6 @@ define <vscale x 1 x float> @test_vlseg5_nxv1f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg5e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -4012,7 +3787,6 @@ define <vscale x 1 x float> @test_vlseg5_mask_nxv1f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -4030,7 +3804,6 @@ define <vscale x 1 x float> @test_vlseg6_nxv1f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg6e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -4049,7 +3822,6 @@ define <vscale x 1 x float> @test_vlseg6_mask_nxv1f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -4067,7 +3839,6 @@ define <vscale x 1 x float> @test_vlseg7_nxv1f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg7e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -4087,7 +3858,6 @@ define <vscale x 1 x float> @test_vlseg7_mask_nxv1f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -4105,7 +3875,6 @@ define <vscale x 1 x float> @test_vlseg8_nxv1f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlseg8e32.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -4126,7 +3895,6 @@ define <vscale x 1 x float> @test_vlseg8_mask_nxv1f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e32.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -4144,7 +3912,6 @@ define <vscale x 8 x half> @test_vlseg2_nxv8f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
@@ -4159,7 +3926,6 @@ define <vscale x 8 x half> @test_vlseg2_mask_nxv8f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e16.v v6, (a0)
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlseg2e16.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
@@ -4177,7 +3943,6 @@ define <vscale x 8 x half> @test_vlseg3_nxv8f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vlseg3e16.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
@@ -4193,7 +3958,6 @@ define <vscale x 8 x half> @test_vlseg3_mask_nxv8f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlseg3e16.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
@@ -4211,7 +3975,6 @@ define <vscale x 8 x half> @test_vlseg4_nxv8f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vlseg4e16.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
@@ -4228,7 +3991,6 @@ define <vscale x 8 x half> @test_vlseg4_mask_nxv8f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlseg4e16.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
@@ -4246,7 +4008,6 @@ define <vscale x 8 x float> @test_vlseg2_nxv8f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vlseg2e32.v v4, (a0)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i64 %vl)
@@ -4261,7 +4022,6 @@ define <vscale x 8 x float> @test_vlseg2_mask_nxv8f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vlseg2e32.v v4, (a0)
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlseg2e32.v v4, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i64 %vl)
@@ -4279,7 +4039,6 @@ define <vscale x 2 x double> @test_vlseg2_nxv2f64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vlseg2e64.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
@@ -4294,7 +4053,6 @@ define <vscale x 2 x double> @test_vlseg2_mask_nxv2f64(ptr %base, i64 %vl, <vsca
 ; CHECK-NEXT:    vlseg2e64.v v6, (a0)
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlseg2e64.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
@@ -4312,7 +4070,6 @@ define <vscale x 2 x double> @test_vlseg3_nxv2f64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vlseg3e64.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
@@ -4328,7 +4085,6 @@ define <vscale x 2 x double> @test_vlseg3_mask_nxv2f64(ptr %base, i64 %vl, <vsca
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlseg3e64.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
@@ -4346,7 +4102,6 @@ define <vscale x 2 x double> @test_vlseg4_nxv2f64(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vlseg4e64.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
@@ -4363,7 +4118,6 @@ define <vscale x 2 x double> @test_vlseg4_mask_nxv2f64(ptr %base, i64 %vl, <vsca
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlseg4e64.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
@@ -4381,7 +4135,6 @@ define <vscale x 4 x half> @test_vlseg2_nxv4f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -4396,7 +4149,6 @@ define <vscale x 4 x half> @test_vlseg2_mask_nxv4f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -4414,7 +4166,6 @@ define <vscale x 4 x half> @test_vlseg3_nxv4f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -4430,7 +4181,6 @@ define <vscale x 4 x half> @test_vlseg3_mask_nxv4f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -4448,7 +4198,6 @@ define <vscale x 4 x half> @test_vlseg4_nxv4f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -4465,7 +4214,6 @@ define <vscale x 4 x half> @test_vlseg4_mask_nxv4f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -4483,7 +4231,6 @@ define <vscale x 4 x half> @test_vlseg5_nxv4f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -4501,7 +4248,6 @@ define <vscale x 4 x half> @test_vlseg5_mask_nxv4f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -4519,7 +4265,6 @@ define <vscale x 4 x half> @test_vlseg6_nxv4f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -4538,7 +4283,6 @@ define <vscale x 4 x half> @test_vlseg6_mask_nxv4f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -4556,7 +4300,6 @@ define <vscale x 4 x half> @test_vlseg7_nxv4f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -4576,7 +4319,6 @@ define <vscale x 4 x half> @test_vlseg7_mask_nxv4f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -4594,7 +4336,6 @@ define <vscale x 4 x half> @test_vlseg8_nxv4f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -4615,7 +4356,6 @@ define <vscale x 4 x half> @test_vlseg8_mask_nxv4f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -4633,7 +4373,6 @@ define <vscale x 2 x half> @test_vlseg2_nxv2f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -4648,7 +4387,6 @@ define <vscale x 2 x half> @test_vlseg2_mask_nxv2f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0)
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlseg2e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -4666,7 +4404,6 @@ define <vscale x 2 x half> @test_vlseg3_nxv2f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -4682,7 +4419,6 @@ define <vscale x 2 x half> @test_vlseg3_mask_nxv2f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlseg3e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -4700,7 +4436,6 @@ define <vscale x 2 x half> @test_vlseg4_nxv2f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -4717,7 +4452,6 @@ define <vscale x 2 x half> @test_vlseg4_mask_nxv2f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlseg4e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -4735,7 +4469,6 @@ define <vscale x 2 x half> @test_vlseg5_nxv2f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -4753,7 +4486,6 @@ define <vscale x 2 x half> @test_vlseg5_mask_nxv2f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlseg5e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -4771,7 +4503,6 @@ define <vscale x 2 x half> @test_vlseg6_nxv2f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -4790,7 +4521,6 @@ define <vscale x 2 x half> @test_vlseg6_mask_nxv2f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlseg6e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -4808,7 +4538,6 @@ define <vscale x 2 x half> @test_vlseg7_nxv2f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -4828,7 +4557,6 @@ define <vscale x 2 x half> @test_vlseg7_mask_nxv2f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlseg7e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -4846,7 +4574,6 @@ define <vscale x 2 x half> @test_vlseg8_nxv2f16(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -4867,7 +4594,6 @@ define <vscale x 2 x half> @test_vlseg8_mask_nxv2f16(ptr %base, i64 %vl, <vscale
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlseg8e16.v v7, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -4885,7 +4611,6 @@ define <vscale x 4 x float> @test_vlseg2_nxv4f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vlseg2e32.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)
@@ -4900,7 +4625,6 @@ define <vscale x 4 x float> @test_vlseg2_mask_nxv4f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vlseg2e32.v v6, (a0)
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlseg2e32.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)
@@ -4918,7 +4642,6 @@ define <vscale x 4 x float> @test_vlseg3_nxv4f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vlseg3e32.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)
@@ -4934,7 +4657,6 @@ define <vscale x 4 x float> @test_vlseg3_mask_nxv4f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlseg3e32.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)
@@ -4952,7 +4674,6 @@ define <vscale x 4 x float> @test_vlseg4_nxv4f32(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vlseg4e32.v v6, (a0)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)
@@ -4969,7 +4690,6 @@ define <vscale x 4 x float> @test_vlseg4_mask_nxv4f32(ptr %base, i64 %vl, <vscal
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlseg4e32.v v6, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll
index 1312fc3f76149..e39d0755da5e3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32-dead.ll
@@ -23,7 +23,6 @@ entry:
 define void @test_vlseg2ff_mask_dead_value(<vscale x 16 x i16> %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
 ; CHECK-LABEL: test_vlseg2ff_mask_dead_value:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vlseg2e16ff.v v8, (a0), v0.t
@@ -42,7 +41,6 @@ define <vscale x 16 x i16> @test_vlseg2ff_dead_vl(ptr %base, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vlseg2e16ff.v v4, (a0)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i32 %vl)
@@ -56,7 +54,6 @@ define <vscale x 16 x i16> @test_vlseg2ff_mask_dead_vl(<vscale x 16 x i16> %val,
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vlseg2e16ff.v v4, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -78,7 +75,6 @@ entry:
 define void @test_vlseg2ff_mask_dead_all(<vscale x 16 x i16> %val, ptr %base, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vlseg2ff_mask_dead_all:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vlseg2e16ff.v v8, (a0), v0.t

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll
index 0059e9ad222ab..3ce08eae9c9ee 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv32.ll
@@ -12,7 +12,6 @@ define <vscale x 16 x i16> @test_vlseg2ff_nxv16i16(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg2e16ff.v v4, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i32 %vl)
@@ -30,7 +29,6 @@ define <vscale x 16 x i16> @test_vlseg2ff_mask_nxv16i16(<vscale x 16 x i16> %val
 ; CHECK-NEXT:    vlseg2e16ff.v v4, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -50,7 +48,6 @@ define <vscale x 1 x i8> @test_vlseg2ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg2e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg2ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -68,7 +65,6 @@ define <vscale x 1 x i8> @test_vlseg2ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr
 ; CHECK-NEXT:    vlseg2e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -88,7 +84,6 @@ define <vscale x 1 x i8> @test_vlseg3ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg3e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg3ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -102,12 +97,11 @@ define <vscale x 1 x i8> @test_vlseg3ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vlseg3e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -127,7 +121,6 @@ define <vscale x 1 x i8> @test_vlseg4ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg4e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg4ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -141,13 +134,12 @@ define <vscale x 1 x i8> @test_vlseg4ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vlseg4e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -167,7 +159,6 @@ define <vscale x 1 x i8> @test_vlseg5ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg5e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg5ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -181,14 +172,13 @@ define <vscale x 1 x i8> @test_vlseg5ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vlseg5e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -208,7 +198,6 @@ define <vscale x 1 x i8> @test_vlseg6ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg6e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg6ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -222,15 +211,14 @@ define <vscale x 1 x i8> @test_vlseg6ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vlseg6e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -250,7 +238,6 @@ define <vscale x 1 x i8> @test_vlseg7ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg7e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg7ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -264,16 +251,15 @@ define <vscale x 1 x i8> @test_vlseg7ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vlseg7e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -293,7 +279,6 @@ define <vscale x 1 x i8> @test_vlseg8ff_nxv1i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg8e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg8ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %vl)
@@ -307,17 +292,16 @@ define <vscale x 1 x i8> @test_vlseg8ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vlseg8e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -337,7 +321,6 @@ define <vscale x 16 x i8> @test_vlseg2ff_nxv16i8(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e8ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
@@ -355,7 +338,6 @@ define <vscale x 16 x i8> @test_vlseg2ff_mask_nxv16i8(<vscale x 16 x i8> %val, p
 ; CHECK-NEXT:    vlseg2e8ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -375,7 +357,6 @@ define <vscale x 16 x i8> @test_vlseg3ff_nxv16i8(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e8ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg3ff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
@@ -389,12 +370,11 @@ define <vscale x 16 x i8> @test_vlseg3ff_mask_nxv16i8(<vscale x 16 x i8> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vlseg3e8ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -414,7 +394,6 @@ define <vscale x 16 x i8> @test_vlseg4ff_nxv16i8(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e8ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg4ff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %vl)
@@ -428,13 +407,12 @@ define <vscale x 16 x i8> @test_vlseg4ff_mask_nxv16i8(<vscale x 16 x i8> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vlseg4e8ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -454,7 +432,6 @@ define <vscale x 2 x i32> @test_vlseg2ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -472,7 +449,6 @@ define <vscale x 2 x i32> @test_vlseg2ff_mask_nxv2i32(<vscale x 2 x i32> %val, p
 ; CHECK-NEXT:    vlseg2e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -492,7 +468,6 @@ define <vscale x 2 x i32> @test_vlseg3ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg3ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -506,12 +481,11 @@ define <vscale x 2 x i32> @test_vlseg3ff_mask_nxv2i32(<vscale x 2 x i32> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg3e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg3ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -531,7 +505,6 @@ define <vscale x 2 x i32> @test_vlseg4ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg4ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -545,13 +518,12 @@ define <vscale x 2 x i32> @test_vlseg4ff_mask_nxv2i32(<vscale x 2 x i32> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg4e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg4ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -571,7 +543,6 @@ define <vscale x 2 x i32> @test_vlseg5ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg5e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg5ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -585,14 +556,13 @@ define <vscale x 2 x i32> @test_vlseg5ff_mask_nxv2i32(<vscale x 2 x i32> %val, p
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg5e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg5ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -612,7 +582,6 @@ define <vscale x 2 x i32> @test_vlseg6ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg6e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg6ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -626,15 +595,14 @@ define <vscale x 2 x i32> @test_vlseg6ff_mask_nxv2i32(<vscale x 2 x i32> %val, p
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg6e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg6ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -654,7 +622,6 @@ define <vscale x 2 x i32> @test_vlseg7ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg7e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg7ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -668,16 +635,15 @@ define <vscale x 2 x i32> @test_vlseg7ff_mask_nxv2i32(<vscale x 2 x i32> %val, p
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg7e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg7ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -697,7 +663,6 @@ define <vscale x 2 x i32> @test_vlseg8ff_nxv2i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg8e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg8ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %vl)
@@ -711,17 +676,16 @@ define <vscale x 2 x i32> @test_vlseg8ff_mask_nxv2i32(<vscale x 2 x i32> %val, p
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg8e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i32} @llvm.riscv.vlseg8ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -741,7 +705,6 @@ define <vscale x 4 x i16> @test_vlseg2ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -759,7 +722,6 @@ define <vscale x 4 x i16> @test_vlseg2ff_mask_nxv4i16(<vscale x 4 x i16> %val, p
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -779,7 +741,6 @@ define <vscale x 4 x i16> @test_vlseg3ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg3ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -793,12 +754,11 @@ define <vscale x 4 x i16> @test_vlseg3ff_mask_nxv4i16(<vscale x 4 x i16> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -818,7 +778,6 @@ define <vscale x 4 x i16> @test_vlseg4ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg4ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -832,13 +791,12 @@ define <vscale x 4 x i16> @test_vlseg4ff_mask_nxv4i16(<vscale x 4 x i16> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -858,7 +816,6 @@ define <vscale x 4 x i16> @test_vlseg5ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg5ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -872,14 +829,13 @@ define <vscale x 4 x i16> @test_vlseg5ff_mask_nxv4i16(<vscale x 4 x i16> %val, p
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg5ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -899,7 +855,6 @@ define <vscale x 4 x i16> @test_vlseg6ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg6ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -913,15 +868,14 @@ define <vscale x 4 x i16> @test_vlseg6ff_mask_nxv4i16(<vscale x 4 x i16> %val, p
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg6ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -941,7 +895,6 @@ define <vscale x 4 x i16> @test_vlseg7ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg7ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -955,16 +908,15 @@ define <vscale x 4 x i16> @test_vlseg7ff_mask_nxv4i16(<vscale x 4 x i16> %val, p
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg7ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -984,7 +936,6 @@ define <vscale x 4 x i16> @test_vlseg8ff_nxv4i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg8ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %vl)
@@ -998,17 +949,16 @@ define <vscale x 4 x i16> @test_vlseg8ff_mask_nxv4i16(<vscale x 4 x i16> %val, p
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i32} @llvm.riscv.vlseg8ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -1028,7 +978,6 @@ define <vscale x 1 x i32> @test_vlseg2ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg2ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -1046,7 +995,6 @@ define <vscale x 1 x i32> @test_vlseg2ff_mask_nxv1i32(<vscale x 1 x i32> %val, p
 ; CHECK-NEXT:    vlseg2e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -1066,7 +1014,6 @@ define <vscale x 1 x i32> @test_vlseg3ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg3ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -1080,12 +1027,11 @@ define <vscale x 1 x i32> @test_vlseg3ff_mask_nxv1i32(<vscale x 1 x i32> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg3e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg3ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -1105,7 +1051,6 @@ define <vscale x 1 x i32> @test_vlseg4ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg4ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -1119,13 +1064,12 @@ define <vscale x 1 x i32> @test_vlseg4ff_mask_nxv1i32(<vscale x 1 x i32> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg4e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg4ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -1145,7 +1089,6 @@ define <vscale x 1 x i32> @test_vlseg5ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg5e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg5ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -1159,14 +1102,13 @@ define <vscale x 1 x i32> @test_vlseg5ff_mask_nxv1i32(<vscale x 1 x i32> %val, p
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg5e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg5ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -1186,7 +1128,6 @@ define <vscale x 1 x i32> @test_vlseg6ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg6e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg6ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -1200,15 +1141,14 @@ define <vscale x 1 x i32> @test_vlseg6ff_mask_nxv1i32(<vscale x 1 x i32> %val, p
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg6e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg6ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -1228,7 +1168,6 @@ define <vscale x 1 x i32> @test_vlseg7ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg7e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg7ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -1242,16 +1181,15 @@ define <vscale x 1 x i32> @test_vlseg7ff_mask_nxv1i32(<vscale x 1 x i32> %val, p
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg7e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg7ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -1271,7 +1209,6 @@ define <vscale x 1 x i32> @test_vlseg8ff_nxv1i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg8e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg8ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %vl)
@@ -1285,17 +1222,16 @@ define <vscale x 1 x i32> @test_vlseg8ff_mask_nxv1i32(<vscale x 1 x i32> %val, p
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg8e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i32} @llvm.riscv.vlseg8ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -1315,7 +1251,6 @@ define <vscale x 8 x i16> @test_vlseg2ff_nxv8i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e16ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
@@ -1333,7 +1268,6 @@ define <vscale x 8 x i16> @test_vlseg2ff_mask_nxv8i16(<vscale x 8 x i16> %val, p
 ; CHECK-NEXT:    vlseg2e16ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -1353,7 +1287,6 @@ define <vscale x 8 x i16> @test_vlseg3ff_nxv8i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e16ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg3ff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
@@ -1367,12 +1300,11 @@ define <vscale x 8 x i16> @test_vlseg3ff_mask_nxv8i16(<vscale x 8 x i16> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vlseg3e16ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -1392,7 +1324,6 @@ define <vscale x 8 x i16> @test_vlseg4ff_nxv8i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e16ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg4ff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %vl)
@@ -1406,13 +1337,12 @@ define <vscale x 8 x i16> @test_vlseg4ff_mask_nxv8i16(<vscale x 8 x i16> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vlseg4e16ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -1432,7 +1362,6 @@ define <vscale x 8 x i8> @test_vlseg2ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg2e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1450,7 +1379,6 @@ define <vscale x 8 x i8> @test_vlseg2ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr
 ; CHECK-NEXT:    vlseg2e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -1470,7 +1398,6 @@ define <vscale x 8 x i8> @test_vlseg3ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg3e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg3ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1484,12 +1411,11 @@ define <vscale x 8 x i8> @test_vlseg3ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vlseg3e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -1509,7 +1435,6 @@ define <vscale x 8 x i8> @test_vlseg4ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg4e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg4ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1523,13 +1448,12 @@ define <vscale x 8 x i8> @test_vlseg4ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vlseg4e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -1549,7 +1473,6 @@ define <vscale x 8 x i8> @test_vlseg5ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg5e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg5ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1563,14 +1486,13 @@ define <vscale x 8 x i8> @test_vlseg5ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vlseg5e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -1590,7 +1512,6 @@ define <vscale x 8 x i8> @test_vlseg6ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg6e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg6ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1604,15 +1525,14 @@ define <vscale x 8 x i8> @test_vlseg6ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vlseg6e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -1632,7 +1552,6 @@ define <vscale x 8 x i8> @test_vlseg7ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg7e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg7ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1646,16 +1565,15 @@ define <vscale x 8 x i8> @test_vlseg7ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vlseg7e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -1675,7 +1593,6 @@ define <vscale x 8 x i8> @test_vlseg8ff_nxv8i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg8e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg8ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %vl)
@@ -1689,17 +1606,16 @@ define <vscale x 8 x i8> @test_vlseg8ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vlseg8e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -1719,7 +1635,6 @@ define <vscale x 8 x i32> @test_vlseg2ff_nxv8i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e32ff.v v4, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} @llvm.riscv.vlseg2ff.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i32 %vl)
@@ -1737,7 +1652,6 @@ define <vscale x 8 x i32> @test_vlseg2ff_mask_nxv8i32(<vscale x 8 x i32> %val, p
 ; CHECK-NEXT:    vlseg2e32ff.v v4, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -1757,7 +1671,6 @@ define <vscale x 4 x i8> @test_vlseg2ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg2e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg2ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1775,7 +1688,6 @@ define <vscale x 4 x i8> @test_vlseg2ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr
 ; CHECK-NEXT:    vlseg2e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -1795,7 +1707,6 @@ define <vscale x 4 x i8> @test_vlseg3ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg3e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg3ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1809,12 +1720,11 @@ define <vscale x 4 x i8> @test_vlseg3ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vlseg3e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -1834,7 +1744,6 @@ define <vscale x 4 x i8> @test_vlseg4ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg4e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg4ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1848,13 +1757,12 @@ define <vscale x 4 x i8> @test_vlseg4ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vlseg4e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -1874,7 +1782,6 @@ define <vscale x 4 x i8> @test_vlseg5ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg5e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg5ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1888,14 +1795,13 @@ define <vscale x 4 x i8> @test_vlseg5ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vlseg5e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -1915,7 +1821,6 @@ define <vscale x 4 x i8> @test_vlseg6ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg6e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg6ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1929,15 +1834,14 @@ define <vscale x 4 x i8> @test_vlseg6ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vlseg6e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -1957,7 +1861,6 @@ define <vscale x 4 x i8> @test_vlseg7ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg7e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg7ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -1971,16 +1874,15 @@ define <vscale x 4 x i8> @test_vlseg7ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vlseg7e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -2000,7 +1902,6 @@ define <vscale x 4 x i8> @test_vlseg8ff_nxv4i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg8e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg8ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %vl)
@@ -2014,17 +1915,16 @@ define <vscale x 4 x i8> @test_vlseg8ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vlseg8e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -2044,7 +1944,6 @@ define <vscale x 1 x i16> @test_vlseg2ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg2ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -2062,7 +1961,6 @@ define <vscale x 1 x i16> @test_vlseg2ff_mask_nxv1i16(<vscale x 1 x i16> %val, p
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2082,7 +1980,6 @@ define <vscale x 1 x i16> @test_vlseg3ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg3ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -2096,12 +1993,11 @@ define <vscale x 1 x i16> @test_vlseg3ff_mask_nxv1i16(<vscale x 1 x i16> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2121,7 +2017,6 @@ define <vscale x 1 x i16> @test_vlseg4ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg4ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -2135,13 +2030,12 @@ define <vscale x 1 x i16> @test_vlseg4ff_mask_nxv1i16(<vscale x 1 x i16> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2161,7 +2055,6 @@ define <vscale x 1 x i16> @test_vlseg5ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg5ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -2175,14 +2068,13 @@ define <vscale x 1 x i16> @test_vlseg5ff_mask_nxv1i16(<vscale x 1 x i16> %val, p
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg5ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2202,7 +2094,6 @@ define <vscale x 1 x i16> @test_vlseg6ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg6ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -2216,15 +2107,14 @@ define <vscale x 1 x i16> @test_vlseg6ff_mask_nxv1i16(<vscale x 1 x i16> %val, p
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg6ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2244,7 +2134,6 @@ define <vscale x 1 x i16> @test_vlseg7ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg7ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -2258,16 +2147,15 @@ define <vscale x 1 x i16> @test_vlseg7ff_mask_nxv1i16(<vscale x 1 x i16> %val, p
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg7ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2287,7 +2175,6 @@ define <vscale x 1 x i16> @test_vlseg8ff_nxv1i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg8ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %vl)
@@ -2301,17 +2188,16 @@ define <vscale x 1 x i16> @test_vlseg8ff_mask_nxv1i16(<vscale x 1 x i16> %val, p
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i32} @llvm.riscv.vlseg8ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2331,7 +2217,6 @@ define <vscale x 32 x i8> @test_vlseg2ff_nxv32i8(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e8ff.v v4, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i32 %vl)
@@ -2349,7 +2234,6 @@ define <vscale x 32 x i8> @test_vlseg2ff_mask_nxv32i8(<vscale x 32 x i8> %val, p
 ; CHECK-NEXT:    vlseg2e8ff.v v4, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
@@ -2369,7 +2253,6 @@ define <vscale x 2 x i8> @test_vlseg2ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg2e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg2ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2387,7 +2270,6 @@ define <vscale x 2 x i8> @test_vlseg2ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr
 ; CHECK-NEXT:    vlseg2e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -2407,7 +2289,6 @@ define <vscale x 2 x i8> @test_vlseg3ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg3e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg3ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2421,12 +2302,11 @@ define <vscale x 2 x i8> @test_vlseg3ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vlseg3e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg3ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -2446,7 +2326,6 @@ define <vscale x 2 x i8> @test_vlseg4ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg4e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg4ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2460,13 +2339,12 @@ define <vscale x 2 x i8> @test_vlseg4ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vlseg4e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg4ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -2486,7 +2364,6 @@ define <vscale x 2 x i8> @test_vlseg5ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg5e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg5ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2500,14 +2377,13 @@ define <vscale x 2 x i8> @test_vlseg5ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vlseg5e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg5ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -2527,7 +2403,6 @@ define <vscale x 2 x i8> @test_vlseg6ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg6e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg6ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2541,15 +2416,14 @@ define <vscale x 2 x i8> @test_vlseg6ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vlseg6e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg6ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -2569,7 +2443,6 @@ define <vscale x 2 x i8> @test_vlseg7ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg7e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg7ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2583,16 +2456,15 @@ define <vscale x 2 x i8> @test_vlseg7ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vlseg7e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg7ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -2612,7 +2484,6 @@ define <vscale x 2 x i8> @test_vlseg8ff_nxv2i8(ptr %base, i32 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg8e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg8ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %vl)
@@ -2626,17 +2497,16 @@ define <vscale x 2 x i8> @test_vlseg8ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vlseg8e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i32} @llvm.riscv.vlseg8ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -2656,7 +2526,6 @@ define <vscale x 2 x i16> @test_vlseg2ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg2ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2674,7 +2543,6 @@ define <vscale x 2 x i16> @test_vlseg2ff_mask_nxv2i16(<vscale x 2 x i16> %val, p
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg2ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -2694,7 +2562,6 @@ define <vscale x 2 x i16> @test_vlseg3ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg3ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2708,12 +2575,11 @@ define <vscale x 2 x i16> @test_vlseg3ff_mask_nxv2i16(<vscale x 2 x i16> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg3ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -2733,7 +2599,6 @@ define <vscale x 2 x i16> @test_vlseg4ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg4ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2747,13 +2612,12 @@ define <vscale x 2 x i16> @test_vlseg4ff_mask_nxv2i16(<vscale x 2 x i16> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg4ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -2773,7 +2637,6 @@ define <vscale x 2 x i16> @test_vlseg5ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg5ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2787,14 +2650,13 @@ define <vscale x 2 x i16> @test_vlseg5ff_mask_nxv2i16(<vscale x 2 x i16> %val, p
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg5ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -2814,7 +2676,6 @@ define <vscale x 2 x i16> @test_vlseg6ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg6ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2828,15 +2689,14 @@ define <vscale x 2 x i16> @test_vlseg6ff_mask_nxv2i16(<vscale x 2 x i16> %val, p
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg6ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -2856,7 +2716,6 @@ define <vscale x 2 x i16> @test_vlseg7ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg7ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2870,16 +2729,15 @@ define <vscale x 2 x i16> @test_vlseg7ff_mask_nxv2i16(<vscale x 2 x i16> %val, p
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg7ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -2899,7 +2757,6 @@ define <vscale x 2 x i16> @test_vlseg8ff_nxv2i16(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg8ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %vl)
@@ -2913,17 +2770,16 @@ define <vscale x 2 x i16> @test_vlseg8ff_mask_nxv2i16(<vscale x 2 x i16> %val, p
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i32} @llvm.riscv.vlseg8ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -2943,7 +2799,6 @@ define <vscale x 4 x i32> @test_vlseg2ff_nxv4i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e32ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
@@ -2961,7 +2816,6 @@ define <vscale x 4 x i32> @test_vlseg2ff_mask_nxv4i32(<vscale x 4 x i32> %val, p
 ; CHECK-NEXT:    vlseg2e32ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -2981,7 +2835,6 @@ define <vscale x 4 x i32> @test_vlseg3ff_nxv4i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e32ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg3ff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
@@ -2995,12 +2848,11 @@ define <vscale x 4 x i32> @test_vlseg3ff_mask_nxv4i32(<vscale x 4 x i32> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vlseg3e32ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg3ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -3020,7 +2872,6 @@ define <vscale x 4 x i32> @test_vlseg4ff_nxv4i32(ptr %base, i32 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e32ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg4ff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %vl)
@@ -3034,13 +2885,12 @@ define <vscale x 4 x i32> @test_vlseg4ff_mask_nxv4i32(<vscale x 4 x i32> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vlseg4e32ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i32} @llvm.riscv.vlseg4ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -3060,7 +2910,6 @@ define <vscale x 16 x half> @test_vlseg2ff_nxv16f16(ptr %base, i32 %vl, ptr %out
 ; CHECK-NEXT:    vlseg2e16ff.v v4, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>, i32} @llvm.riscv.vlseg2ff.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i32 %vl)
@@ -3078,7 +2927,6 @@ define <vscale x 16 x half> @test_vlseg2ff_mask_nxv16f16(<vscale x 16 x half> %v
 ; CHECK-NEXT:    vlseg2e16ff.v v4, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv16f16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -3098,7 +2946,6 @@ define <vscale x 4 x double> @test_vlseg2ff_nxv4f64(ptr %base, i32 %vl, ptr %out
 ; CHECK-NEXT:    vlseg2e64ff.v v4, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>, i32} @llvm.riscv.vlseg2ff.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i32 %vl)
@@ -3116,7 +2963,6 @@ define <vscale x 4 x double> @test_vlseg2ff_mask_nxv4f64(<vscale x 4 x double> %
 ; CHECK-NEXT:    vlseg2e64ff.v v4, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>, i32} @llvm.riscv.vlseg2ff.mask.nxv4f64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -3136,7 +2982,6 @@ define <vscale x 1 x double> @test_vlseg2ff_nxv1f64(ptr %base, i32 %vl, ptr %out
 ; CHECK-NEXT:    vlseg2e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg2ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -3154,7 +2999,6 @@ define <vscale x 1 x double> @test_vlseg2ff_mask_nxv1f64(<vscale x 1 x double> %
 ; CHECK-NEXT:    vlseg2e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg2ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -3174,7 +3018,6 @@ define <vscale x 1 x double> @test_vlseg3ff_nxv1f64(ptr %base, i32 %vl, ptr %out
 ; CHECK-NEXT:    vlseg3e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg3ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -3188,12 +3031,11 @@ define <vscale x 1 x double> @test_vlseg3ff_mask_nxv1f64(<vscale x 1 x double> %
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg3e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg3ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -3213,7 +3055,6 @@ define <vscale x 1 x double> @test_vlseg4ff_nxv1f64(ptr %base, i32 %vl, ptr %out
 ; CHECK-NEXT:    vlseg4e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg4ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -3227,13 +3068,12 @@ define <vscale x 1 x double> @test_vlseg4ff_mask_nxv1f64(<vscale x 1 x double> %
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg4e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg4ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -3253,7 +3093,6 @@ define <vscale x 1 x double> @test_vlseg5ff_nxv1f64(ptr %base, i32 %vl, ptr %out
 ; CHECK-NEXT:    vlseg5e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg5ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -3267,14 +3106,13 @@ define <vscale x 1 x double> @test_vlseg5ff_mask_nxv1f64(<vscale x 1 x double> %
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg5e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg5ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -3294,7 +3132,6 @@ define <vscale x 1 x double> @test_vlseg6ff_nxv1f64(ptr %base, i32 %vl, ptr %out
 ; CHECK-NEXT:    vlseg6e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg6ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -3308,15 +3145,14 @@ define <vscale x 1 x double> @test_vlseg6ff_mask_nxv1f64(<vscale x 1 x double> %
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg6e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg6ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -3336,7 +3172,6 @@ define <vscale x 1 x double> @test_vlseg7ff_nxv1f64(ptr %base, i32 %vl, ptr %out
 ; CHECK-NEXT:    vlseg7e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg7ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -3350,16 +3185,15 @@ define <vscale x 1 x double> @test_vlseg7ff_mask_nxv1f64(<vscale x 1 x double> %
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg7e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg7ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -3379,7 +3213,6 @@ define <vscale x 1 x double> @test_vlseg8ff_nxv1f64(ptr %base, i32 %vl, ptr %out
 ; CHECK-NEXT:    vlseg8e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg8ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %vl)
@@ -3393,17 +3226,16 @@ define <vscale x 1 x double> @test_vlseg8ff_mask_nxv1f64(<vscale x 1 x double> %
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg8e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i32} @llvm.riscv.vlseg8ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -3423,7 +3255,6 @@ define <vscale x 2 x float> @test_vlseg2ff_nxv2f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg2e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg2ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3441,7 +3272,6 @@ define <vscale x 2 x float> @test_vlseg2ff_mask_nxv2f32(<vscale x 2 x float> %va
 ; CHECK-NEXT:    vlseg2e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -3461,7 +3291,6 @@ define <vscale x 2 x float> @test_vlseg3ff_nxv2f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg3e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg3ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3475,12 +3304,11 @@ define <vscale x 2 x float> @test_vlseg3ff_mask_nxv2f32(<vscale x 2 x float> %va
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg3e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg3ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -3500,7 +3328,6 @@ define <vscale x 2 x float> @test_vlseg4ff_nxv2f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg4e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg4ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3514,13 +3341,12 @@ define <vscale x 2 x float> @test_vlseg4ff_mask_nxv2f32(<vscale x 2 x float> %va
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg4e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg4ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -3540,7 +3366,6 @@ define <vscale x 2 x float> @test_vlseg5ff_nxv2f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg5e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg5ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3554,14 +3379,13 @@ define <vscale x 2 x float> @test_vlseg5ff_mask_nxv2f32(<vscale x 2 x float> %va
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg5e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg5ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -3581,7 +3405,6 @@ define <vscale x 2 x float> @test_vlseg6ff_nxv2f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg6e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg6ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3595,15 +3418,14 @@ define <vscale x 2 x float> @test_vlseg6ff_mask_nxv2f32(<vscale x 2 x float> %va
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg6e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg6ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -3623,7 +3445,6 @@ define <vscale x 2 x float> @test_vlseg7ff_nxv2f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg7e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg7ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3637,16 +3458,15 @@ define <vscale x 2 x float> @test_vlseg7ff_mask_nxv2f32(<vscale x 2 x float> %va
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg7e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg7ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -3666,7 +3486,6 @@ define <vscale x 2 x float> @test_vlseg8ff_nxv2f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg8e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg8ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %vl)
@@ -3680,17 +3499,16 @@ define <vscale x 2 x float> @test_vlseg8ff_mask_nxv2f32(<vscale x 2 x float> %va
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg8e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i32} @llvm.riscv.vlseg8ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -3710,7 +3528,6 @@ define <vscale x 1 x half> @test_vlseg2ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg2ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3728,7 +3545,6 @@ define <vscale x 1 x half> @test_vlseg2ff_mask_nxv1f16(<vscale x 1 x half> %val,
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -3748,7 +3564,6 @@ define <vscale x 1 x half> @test_vlseg3ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg3ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3762,12 +3577,11 @@ define <vscale x 1 x half> @test_vlseg3ff_mask_nxv1f16(<vscale x 1 x half> %val,
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -3787,7 +3601,6 @@ define <vscale x 1 x half> @test_vlseg4ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg4ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3801,13 +3614,12 @@ define <vscale x 1 x half> @test_vlseg4ff_mask_nxv1f16(<vscale x 1 x half> %val,
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -3827,7 +3639,6 @@ define <vscale x 1 x half> @test_vlseg5ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg5ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3841,14 +3652,13 @@ define <vscale x 1 x half> @test_vlseg5ff_mask_nxv1f16(<vscale x 1 x half> %val,
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg5ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -3868,7 +3678,6 @@ define <vscale x 1 x half> @test_vlseg6ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg6ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3882,15 +3691,14 @@ define <vscale x 1 x half> @test_vlseg6ff_mask_nxv1f16(<vscale x 1 x half> %val,
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg6ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -3910,7 +3718,6 @@ define <vscale x 1 x half> @test_vlseg7ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg7ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3924,16 +3731,15 @@ define <vscale x 1 x half> @test_vlseg7ff_mask_nxv1f16(<vscale x 1 x half> %val,
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg7ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -3953,7 +3759,6 @@ define <vscale x 1 x half> @test_vlseg8ff_nxv1f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg8ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %vl)
@@ -3967,17 +3772,16 @@ define <vscale x 1 x half> @test_vlseg8ff_mask_nxv1f16(<vscale x 1 x half> %val,
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i32} @llvm.riscv.vlseg8ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -3997,7 +3801,6 @@ define <vscale x 1 x float> @test_vlseg2ff_nxv1f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg2e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg2ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -4015,7 +3818,6 @@ define <vscale x 1 x float> @test_vlseg2ff_mask_nxv1f32(<vscale x 1 x float> %va
 ; CHECK-NEXT:    vlseg2e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -4035,7 +3837,6 @@ define <vscale x 1 x float> @test_vlseg3ff_nxv1f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg3e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg3ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -4049,12 +3850,11 @@ define <vscale x 1 x float> @test_vlseg3ff_mask_nxv1f32(<vscale x 1 x float> %va
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg3e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg3ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -4074,7 +3874,6 @@ define <vscale x 1 x float> @test_vlseg4ff_nxv1f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg4e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg4ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -4088,13 +3887,12 @@ define <vscale x 1 x float> @test_vlseg4ff_mask_nxv1f32(<vscale x 1 x float> %va
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg4e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg4ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -4114,7 +3912,6 @@ define <vscale x 1 x float> @test_vlseg5ff_nxv1f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg5e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg5ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -4128,14 +3925,13 @@ define <vscale x 1 x float> @test_vlseg5ff_mask_nxv1f32(<vscale x 1 x float> %va
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg5e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg5ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -4155,7 +3951,6 @@ define <vscale x 1 x float> @test_vlseg6ff_nxv1f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg6e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg6ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -4169,15 +3964,14 @@ define <vscale x 1 x float> @test_vlseg6ff_mask_nxv1f32(<vscale x 1 x float> %va
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg6e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg6ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -4197,7 +3991,6 @@ define <vscale x 1 x float> @test_vlseg7ff_nxv1f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg7e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg7ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -4211,16 +4004,15 @@ define <vscale x 1 x float> @test_vlseg7ff_mask_nxv1f32(<vscale x 1 x float> %va
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg7e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg7ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -4240,7 +4032,6 @@ define <vscale x 1 x float> @test_vlseg8ff_nxv1f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg8e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg8ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %vl)
@@ -4254,17 +4045,16 @@ define <vscale x 1 x float> @test_vlseg8ff_mask_nxv1f32(<vscale x 1 x float> %va
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg8e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i32} @llvm.riscv.vlseg8ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -4284,7 +4074,6 @@ define <vscale x 8 x half> @test_vlseg2ff_nxv8f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg2e16ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg2ff.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
@@ -4302,7 +4091,6 @@ define <vscale x 8 x half> @test_vlseg2ff_mask_nxv8f16(<vscale x 8 x half> %val,
 ; CHECK-NEXT:    vlseg2e16ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -4322,7 +4110,6 @@ define <vscale x 8 x half> @test_vlseg3ff_nxv8f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg3e16ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg3ff.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
@@ -4336,12 +4123,11 @@ define <vscale x 8 x half> @test_vlseg3ff_mask_nxv8f16(<vscale x 8 x half> %val,
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vlseg3e16ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -4361,7 +4147,6 @@ define <vscale x 8 x half> @test_vlseg4ff_nxv8f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg4e16ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg4ff.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %vl)
@@ -4375,13 +4160,12 @@ define <vscale x 8 x half> @test_vlseg4ff_mask_nxv8f16(<vscale x 8 x half> %val,
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vlseg4e16ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -4401,7 +4185,6 @@ define <vscale x 8 x float> @test_vlseg2ff_nxv8f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg2e32ff.v v4, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>, i32} @llvm.riscv.vlseg2ff.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i32 %vl)
@@ -4419,7 +4202,6 @@ define <vscale x 8 x float> @test_vlseg2ff_mask_nxv8f32(<vscale x 8 x float> %va
 ; CHECK-NEXT:    vlseg2e32ff.v v4, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv8f32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -4439,7 +4221,6 @@ define <vscale x 2 x double> @test_vlseg2ff_nxv2f64(ptr %base, i32 %vl, ptr %out
 ; CHECK-NEXT:    vlseg2e64ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg2ff.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
@@ -4457,7 +4238,6 @@ define <vscale x 2 x double> @test_vlseg2ff_mask_nxv2f64(<vscale x 2 x double> %
 ; CHECK-NEXT:    vlseg2e64ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg2ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -4477,7 +4257,6 @@ define <vscale x 2 x double> @test_vlseg3ff_nxv2f64(ptr %base, i32 %vl, ptr %out
 ; CHECK-NEXT:    vlseg3e64ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg3ff.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
@@ -4491,12 +4270,11 @@ define <vscale x 2 x double> @test_vlseg3ff_mask_nxv2f64(<vscale x 2 x double> %
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vlseg3e64ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg3ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -4516,7 +4294,6 @@ define <vscale x 2 x double> @test_vlseg4ff_nxv2f64(ptr %base, i32 %vl, ptr %out
 ; CHECK-NEXT:    vlseg4e64ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg4ff.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %vl)
@@ -4530,13 +4307,12 @@ define <vscale x 2 x double> @test_vlseg4ff_mask_nxv2f64(<vscale x 2 x double> %
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vlseg4e64ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i32} @llvm.riscv.vlseg4ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -4556,7 +4332,6 @@ define <vscale x 4 x half> @test_vlseg2ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg2ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4574,7 +4349,6 @@ define <vscale x 4 x half> @test_vlseg2ff_mask_nxv4f16(<vscale x 4 x half> %val,
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4594,7 +4368,6 @@ define <vscale x 4 x half> @test_vlseg3ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg3ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4608,12 +4381,11 @@ define <vscale x 4 x half> @test_vlseg3ff_mask_nxv4f16(<vscale x 4 x half> %val,
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4633,7 +4405,6 @@ define <vscale x 4 x half> @test_vlseg4ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg4ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4647,13 +4418,12 @@ define <vscale x 4 x half> @test_vlseg4ff_mask_nxv4f16(<vscale x 4 x half> %val,
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4673,7 +4443,6 @@ define <vscale x 4 x half> @test_vlseg5ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg5ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4687,14 +4456,13 @@ define <vscale x 4 x half> @test_vlseg5ff_mask_nxv4f16(<vscale x 4 x half> %val,
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg5ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4714,7 +4482,6 @@ define <vscale x 4 x half> @test_vlseg6ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg6ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4728,15 +4495,14 @@ define <vscale x 4 x half> @test_vlseg6ff_mask_nxv4f16(<vscale x 4 x half> %val,
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg6ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4756,7 +4522,6 @@ define <vscale x 4 x half> @test_vlseg7ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg7ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4770,16 +4535,15 @@ define <vscale x 4 x half> @test_vlseg7ff_mask_nxv4f16(<vscale x 4 x half> %val,
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg7ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4799,7 +4563,6 @@ define <vscale x 4 x half> @test_vlseg8ff_nxv4f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg8ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %vl)
@@ -4813,17 +4576,16 @@ define <vscale x 4 x half> @test_vlseg8ff_mask_nxv4f16(<vscale x 4 x half> %val,
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i32} @llvm.riscv.vlseg8ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4843,7 +4605,6 @@ define <vscale x 2 x half> @test_vlseg2ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg2ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4861,7 +4622,6 @@ define <vscale x 2 x half> @test_vlseg2ff_mask_nxv2f16(<vscale x 2 x half> %val,
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg2ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -4881,7 +4641,6 @@ define <vscale x 2 x half> @test_vlseg3ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg3ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4895,12 +4654,11 @@ define <vscale x 2 x half> @test_vlseg3ff_mask_nxv2f16(<vscale x 2 x half> %val,
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg3ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -4920,7 +4678,6 @@ define <vscale x 2 x half> @test_vlseg4ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg4ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4934,13 +4691,12 @@ define <vscale x 2 x half> @test_vlseg4ff_mask_nxv2f16(<vscale x 2 x half> %val,
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg4ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -4960,7 +4716,6 @@ define <vscale x 2 x half> @test_vlseg5ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg5ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -4974,14 +4729,13 @@ define <vscale x 2 x half> @test_vlseg5ff_mask_nxv2f16(<vscale x 2 x half> %val,
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg5ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -5001,7 +4755,6 @@ define <vscale x 2 x half> @test_vlseg6ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg6ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -5015,15 +4768,14 @@ define <vscale x 2 x half> @test_vlseg6ff_mask_nxv2f16(<vscale x 2 x half> %val,
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg6ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -5043,7 +4795,6 @@ define <vscale x 2 x half> @test_vlseg7ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg7ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -5057,16 +4808,15 @@ define <vscale x 2 x half> @test_vlseg7ff_mask_nxv2f16(<vscale x 2 x half> %val,
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg7ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -5086,7 +4836,6 @@ define <vscale x 2 x half> @test_vlseg8ff_nxv2f16(ptr %base, i32 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg8ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %vl)
@@ -5100,17 +4849,16 @@ define <vscale x 2 x half> @test_vlseg8ff_mask_nxv2f16(<vscale x 2 x half> %val,
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i32} @llvm.riscv.vlseg8ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -5130,7 +4878,6 @@ define <vscale x 4 x float> @test_vlseg2ff_nxv4f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg2e32ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg2ff.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
@@ -5148,7 +4895,6 @@ define <vscale x 4 x float> @test_vlseg2ff_mask_nxv4f32(<vscale x 4 x float> %va
 ; CHECK-NEXT:    vlseg2e32ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg2ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -5168,7 +4914,6 @@ define <vscale x 4 x float> @test_vlseg3ff_nxv4f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg3e32ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg3ff.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
@@ -5182,12 +4927,11 @@ define <vscale x 4 x float> @test_vlseg3ff_mask_nxv4f32(<vscale x 4 x float> %va
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vlseg3e32ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg3ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -5207,7 +4951,6 @@ define <vscale x 4 x float> @test_vlseg4ff_nxv4f32(ptr %base, i32 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg4e32ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg4ff.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %vl)
@@ -5221,13 +4964,12 @@ define <vscale x 4 x float> @test_vlseg4ff_mask_nxv4f32(<vscale x 4 x float> %va
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vlseg4e32ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sw a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i32} @llvm.riscv.vlseg4ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl, i32 1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll
index 2930a38d02805..3cd44f2c3bef4 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64-dead.ll
@@ -23,7 +23,6 @@ entry:
 define void @test_vlseg2ff_mask_dead_value(<vscale x 16 x i16> %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask, ptr %outvl) {
 ; CHECK-LABEL: test_vlseg2ff_mask_dead_value:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vlseg2e16ff.v v8, (a0), v0.t
@@ -42,7 +41,6 @@ define <vscale x 16 x i16> @test_vlseg2ff_dead_vl(ptr %base, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vlseg2e16ff.v v4, (a0)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 %vl)
@@ -56,7 +54,6 @@ define <vscale x 16 x i16> @test_vlseg2ff_mask_dead_vl(<vscale x 16 x i16> %val,
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vlseg2e16ff.v v4, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -78,7 +75,6 @@ entry:
 define void @test_vlseg2ff_mask_dead_all(<vscale x 16 x i16> %val, ptr %base, i64 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vlseg2ff_mask_dead_all:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vlseg2e16ff.v v8, (a0), v0.t

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll
index ff81faeaf4186..126975b1a60fa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsegff-rv64.ll
@@ -12,7 +12,6 @@ define <vscale x 16 x i16> @test_vlseg2ff_nxv16i16(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg2e16ff.v v4, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 %vl)
@@ -30,7 +29,6 @@ define <vscale x 16 x i16> @test_vlseg2ff_mask_nxv16i16(<vscale x 16 x i16> %val
 ; CHECK-NEXT:    vlseg2e16ff.v v4, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -50,7 +48,6 @@ define <vscale x 4 x i32> @test_vlseg2ff_nxv4i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e32ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
@@ -68,7 +65,6 @@ define <vscale x 4 x i32> @test_vlseg2ff_mask_nxv4i32(<vscale x 4 x i32> %val, p
 ; CHECK-NEXT:    vlseg2e32ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -88,7 +84,6 @@ define <vscale x 4 x i32> @test_vlseg3ff_nxv4i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e32ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg3ff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
@@ -102,12 +97,11 @@ define <vscale x 4 x i32> @test_vlseg3ff_mask_nxv4i32(<vscale x 4 x i32> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vlseg3e32ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg3ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -127,7 +121,6 @@ define <vscale x 4 x i32> @test_vlseg4ff_nxv4i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e32ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg4ff.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %vl)
@@ -141,13 +134,12 @@ define <vscale x 4 x i32> @test_vlseg4ff_mask_nxv4i32(<vscale x 4 x i32> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vlseg4e32ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>, i64} @llvm.riscv.vlseg4ff.mask.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -167,7 +159,6 @@ define <vscale x 16 x i8> @test_vlseg2ff_nxv16i8(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e8ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
@@ -185,7 +176,6 @@ define <vscale x 16 x i8> @test_vlseg2ff_mask_nxv16i8(<vscale x 16 x i8> %val, p
 ; CHECK-NEXT:    vlseg2e8ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -205,7 +195,6 @@ define <vscale x 16 x i8> @test_vlseg3ff_nxv16i8(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e8ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg3ff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
@@ -219,12 +208,11 @@ define <vscale x 16 x i8> @test_vlseg3ff_mask_nxv16i8(<vscale x 16 x i8> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vlseg3e8ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg3ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -244,7 +232,6 @@ define <vscale x 16 x i8> @test_vlseg4ff_nxv16i8(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e8ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg4ff.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %vl)
@@ -258,13 +245,12 @@ define <vscale x 16 x i8> @test_vlseg4ff_mask_nxv16i8(<vscale x 16 x i8> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vlseg4e8ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>, i64} @llvm.riscv.vlseg4ff.mask.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -284,7 +270,6 @@ define <vscale x 1 x i64> @test_vlseg2ff_nxv1i64(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -302,7 +287,6 @@ define <vscale x 1 x i64> @test_vlseg2ff_mask_nxv1i64(<vscale x 1 x i64> %val, p
 ; CHECK-NEXT:    vlseg2e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -322,7 +306,6 @@ define <vscale x 1 x i64> @test_vlseg3ff_nxv1i64(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg3ff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -336,12 +319,11 @@ define <vscale x 1 x i64> @test_vlseg3ff_mask_nxv1i64(<vscale x 1 x i64> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg3e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg3ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -361,7 +343,6 @@ define <vscale x 1 x i64> @test_vlseg4ff_nxv1i64(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg4ff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -375,13 +356,12 @@ define <vscale x 1 x i64> @test_vlseg4ff_mask_nxv1i64(<vscale x 1 x i64> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg4e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg4ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -401,7 +381,6 @@ define <vscale x 1 x i64> @test_vlseg5ff_nxv1i64(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg5e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg5ff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -415,14 +394,13 @@ define <vscale x 1 x i64> @test_vlseg5ff_mask_nxv1i64(<vscale x 1 x i64> %val, p
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg5e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg5ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -442,7 +420,6 @@ define <vscale x 1 x i64> @test_vlseg6ff_nxv1i64(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg6e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg6ff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -456,15 +433,14 @@ define <vscale x 1 x i64> @test_vlseg6ff_mask_nxv1i64(<vscale x 1 x i64> %val, p
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg6e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg6ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -484,7 +460,6 @@ define <vscale x 1 x i64> @test_vlseg7ff_nxv1i64(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg7e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg7ff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -498,16 +473,15 @@ define <vscale x 1 x i64> @test_vlseg7ff_mask_nxv1i64(<vscale x 1 x i64> %val, p
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg7e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg7ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -527,7 +501,6 @@ define <vscale x 1 x i64> @test_vlseg8ff_nxv1i64(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg8e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg8ff.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %vl)
@@ -541,17 +514,16 @@ define <vscale x 1 x i64> @test_vlseg8ff_mask_nxv1i64(<vscale x 1 x i64> %val, p
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg8e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>, i64} @llvm.riscv.vlseg8ff.mask.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -571,7 +543,6 @@ define <vscale x 1 x i32> @test_vlseg2ff_nxv1i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg2ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -589,7 +560,6 @@ define <vscale x 1 x i32> @test_vlseg2ff_mask_nxv1i32(<vscale x 1 x i32> %val, p
 ; CHECK-NEXT:    vlseg2e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -609,7 +579,6 @@ define <vscale x 1 x i32> @test_vlseg3ff_nxv1i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg3ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -623,12 +592,11 @@ define <vscale x 1 x i32> @test_vlseg3ff_mask_nxv1i32(<vscale x 1 x i32> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg3e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg3ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -648,7 +616,6 @@ define <vscale x 1 x i32> @test_vlseg4ff_nxv1i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg4ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -662,13 +629,12 @@ define <vscale x 1 x i32> @test_vlseg4ff_mask_nxv1i32(<vscale x 1 x i32> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg4e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg4ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -688,7 +654,6 @@ define <vscale x 1 x i32> @test_vlseg5ff_nxv1i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg5e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg5ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -702,14 +667,13 @@ define <vscale x 1 x i32> @test_vlseg5ff_mask_nxv1i32(<vscale x 1 x i32> %val, p
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg5e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg5ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -729,7 +693,6 @@ define <vscale x 1 x i32> @test_vlseg6ff_nxv1i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg6e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg6ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -743,15 +706,14 @@ define <vscale x 1 x i32> @test_vlseg6ff_mask_nxv1i32(<vscale x 1 x i32> %val, p
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg6e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg6ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -771,7 +733,6 @@ define <vscale x 1 x i32> @test_vlseg7ff_nxv1i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg7e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg7ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -785,16 +746,15 @@ define <vscale x 1 x i32> @test_vlseg7ff_mask_nxv1i32(<vscale x 1 x i32> %val, p
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg7e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg7ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -814,7 +774,6 @@ define <vscale x 1 x i32> @test_vlseg8ff_nxv1i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg8e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg8ff.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %vl)
@@ -828,17 +787,16 @@ define <vscale x 1 x i32> @test_vlseg8ff_mask_nxv1i32(<vscale x 1 x i32> %val, p
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg8e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>, i64} @llvm.riscv.vlseg8ff.mask.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -858,7 +816,6 @@ define <vscale x 8 x i16> @test_vlseg2ff_nxv8i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e16ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
@@ -876,7 +833,6 @@ define <vscale x 8 x i16> @test_vlseg2ff_mask_nxv8i16(<vscale x 8 x i16> %val, p
 ; CHECK-NEXT:    vlseg2e16ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -896,7 +852,6 @@ define <vscale x 8 x i16> @test_vlseg3ff_nxv8i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e16ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg3ff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
@@ -910,12 +865,11 @@ define <vscale x 8 x i16> @test_vlseg3ff_mask_nxv8i16(<vscale x 8 x i16> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vlseg3e16ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg3ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -935,7 +889,6 @@ define <vscale x 8 x i16> @test_vlseg4ff_nxv8i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e16ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg4ff.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %vl)
@@ -949,13 +902,12 @@ define <vscale x 8 x i16> @test_vlseg4ff_mask_nxv8i16(<vscale x 8 x i16> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vlseg4e16ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>, i64} @llvm.riscv.vlseg4ff.mask.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -975,7 +927,6 @@ define <vscale x 4 x i8> @test_vlseg2ff_nxv4i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg2e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg2ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -993,7 +944,6 @@ define <vscale x 4 x i8> @test_vlseg2ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr
 ; CHECK-NEXT:    vlseg2e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -1013,7 +963,6 @@ define <vscale x 4 x i8> @test_vlseg3ff_nxv4i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg3e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg3ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -1027,12 +976,11 @@ define <vscale x 4 x i8> @test_vlseg3ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vlseg3e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg3ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -1052,7 +1000,6 @@ define <vscale x 4 x i8> @test_vlseg4ff_nxv4i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg4e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg4ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -1066,13 +1013,12 @@ define <vscale x 4 x i8> @test_vlseg4ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vlseg4e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg4ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -1092,7 +1038,6 @@ define <vscale x 4 x i8> @test_vlseg5ff_nxv4i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg5e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg5ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -1106,14 +1051,13 @@ define <vscale x 4 x i8> @test_vlseg5ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vlseg5e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg5ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -1133,7 +1077,6 @@ define <vscale x 4 x i8> @test_vlseg6ff_nxv4i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg6e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg6ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -1147,15 +1090,14 @@ define <vscale x 4 x i8> @test_vlseg6ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vlseg6e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg6ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -1175,7 +1117,6 @@ define <vscale x 4 x i8> @test_vlseg7ff_nxv4i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg7e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg7ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -1189,16 +1130,15 @@ define <vscale x 4 x i8> @test_vlseg7ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vlseg7e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg7ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -1218,7 +1158,6 @@ define <vscale x 4 x i8> @test_vlseg8ff_nxv4i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg8e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg8ff.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %vl)
@@ -1232,17 +1171,16 @@ define <vscale x 4 x i8> @test_vlseg8ff_mask_nxv4i8(<vscale x 4 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vlseg8e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>, i64} @llvm.riscv.vlseg8ff.mask.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -1262,7 +1200,6 @@ define <vscale x 1 x i16> @test_vlseg2ff_nxv1i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg2ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1280,7 +1217,6 @@ define <vscale x 1 x i16> @test_vlseg2ff_mask_nxv1i16(<vscale x 1 x i16> %val, p
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1300,7 +1236,6 @@ define <vscale x 1 x i16> @test_vlseg3ff_nxv1i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg3ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1314,12 +1249,11 @@ define <vscale x 1 x i16> @test_vlseg3ff_mask_nxv1i16(<vscale x 1 x i16> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg3ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1339,7 +1273,6 @@ define <vscale x 1 x i16> @test_vlseg4ff_nxv1i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg4ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1353,13 +1286,12 @@ define <vscale x 1 x i16> @test_vlseg4ff_mask_nxv1i16(<vscale x 1 x i16> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg4ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1379,7 +1311,6 @@ define <vscale x 1 x i16> @test_vlseg5ff_nxv1i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg5ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1393,14 +1324,13 @@ define <vscale x 1 x i16> @test_vlseg5ff_mask_nxv1i16(<vscale x 1 x i16> %val, p
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg5ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1420,7 +1350,6 @@ define <vscale x 1 x i16> @test_vlseg6ff_nxv1i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg6ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1434,15 +1363,14 @@ define <vscale x 1 x i16> @test_vlseg6ff_mask_nxv1i16(<vscale x 1 x i16> %val, p
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg6ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1462,7 +1390,6 @@ define <vscale x 1 x i16> @test_vlseg7ff_nxv1i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg7ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1476,16 +1403,15 @@ define <vscale x 1 x i16> @test_vlseg7ff_mask_nxv1i16(<vscale x 1 x i16> %val, p
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg7ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1505,7 +1431,6 @@ define <vscale x 1 x i16> @test_vlseg8ff_nxv1i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg8ff.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %vl)
@@ -1519,17 +1444,16 @@ define <vscale x 1 x i16> @test_vlseg8ff_mask_nxv1i16(<vscale x 1 x i16> %val, p
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>, i64} @llvm.riscv.vlseg8ff.mask.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1549,7 +1473,6 @@ define <vscale x 2 x i32> @test_vlseg2ff_nxv2i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1567,7 +1490,6 @@ define <vscale x 2 x i32> @test_vlseg2ff_mask_nxv2i32(<vscale x 2 x i32> %val, p
 ; CHECK-NEXT:    vlseg2e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -1587,7 +1509,6 @@ define <vscale x 2 x i32> @test_vlseg3ff_nxv2i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg3ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1601,12 +1522,11 @@ define <vscale x 2 x i32> @test_vlseg3ff_mask_nxv2i32(<vscale x 2 x i32> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg3e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg3ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -1626,7 +1546,6 @@ define <vscale x 2 x i32> @test_vlseg4ff_nxv2i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg4ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1640,13 +1559,12 @@ define <vscale x 2 x i32> @test_vlseg4ff_mask_nxv2i32(<vscale x 2 x i32> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg4e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg4ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -1666,7 +1584,6 @@ define <vscale x 2 x i32> @test_vlseg5ff_nxv2i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg5e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg5ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1680,14 +1597,13 @@ define <vscale x 2 x i32> @test_vlseg5ff_mask_nxv2i32(<vscale x 2 x i32> %val, p
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg5e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg5ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -1707,7 +1623,6 @@ define <vscale x 2 x i32> @test_vlseg6ff_nxv2i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg6e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg6ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1721,15 +1636,14 @@ define <vscale x 2 x i32> @test_vlseg6ff_mask_nxv2i32(<vscale x 2 x i32> %val, p
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg6e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg6ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -1749,7 +1663,6 @@ define <vscale x 2 x i32> @test_vlseg7ff_nxv2i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg7e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg7ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1763,16 +1676,15 @@ define <vscale x 2 x i32> @test_vlseg7ff_mask_nxv2i32(<vscale x 2 x i32> %val, p
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg7e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg7ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -1792,7 +1704,6 @@ define <vscale x 2 x i32> @test_vlseg8ff_nxv2i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg8e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg8ff.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %vl)
@@ -1806,17 +1717,16 @@ define <vscale x 2 x i32> @test_vlseg8ff_mask_nxv2i32(<vscale x 2 x i32> %val, p
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg8e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>, i64} @llvm.riscv.vlseg8ff.mask.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -1836,7 +1746,6 @@ define <vscale x 8 x i8> @test_vlseg2ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg2e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1854,7 +1763,6 @@ define <vscale x 8 x i8> @test_vlseg2ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr
 ; CHECK-NEXT:    vlseg2e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -1874,7 +1782,6 @@ define <vscale x 8 x i8> @test_vlseg3ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg3e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg3ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1888,12 +1795,11 @@ define <vscale x 8 x i8> @test_vlseg3ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vlseg3e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg3ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -1913,7 +1819,6 @@ define <vscale x 8 x i8> @test_vlseg4ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg4e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg4ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1927,13 +1832,12 @@ define <vscale x 8 x i8> @test_vlseg4ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vlseg4e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg4ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -1953,7 +1857,6 @@ define <vscale x 8 x i8> @test_vlseg5ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg5e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg5ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -1967,14 +1870,13 @@ define <vscale x 8 x i8> @test_vlseg5ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vlseg5e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg5ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -1994,7 +1896,6 @@ define <vscale x 8 x i8> @test_vlseg6ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg6e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg6ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -2008,15 +1909,14 @@ define <vscale x 8 x i8> @test_vlseg6ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vlseg6e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg6ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2036,7 +1936,6 @@ define <vscale x 8 x i8> @test_vlseg7ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg7e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg7ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -2050,16 +1949,15 @@ define <vscale x 8 x i8> @test_vlseg7ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vlseg7e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg7ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2079,7 +1977,6 @@ define <vscale x 8 x i8> @test_vlseg8ff_nxv8i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg8e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg8ff.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %vl)
@@ -2093,17 +1990,16 @@ define <vscale x 8 x i8> @test_vlseg8ff_mask_nxv8i8(<vscale x 8 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vlseg8e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>, i64} @llvm.riscv.vlseg8ff.mask.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2123,7 +2019,6 @@ define <vscale x 4 x i64> @test_vlseg2ff_nxv4i64(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e64ff.v v4, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, ptr %base, i64 %vl)
@@ -2141,7 +2036,6 @@ define <vscale x 4 x i64> @test_vlseg2ff_mask_nxv4i64(<vscale x 4 x i64> %val, p
 ; CHECK-NEXT:    vlseg2e64ff.v v4, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -2161,7 +2055,6 @@ define <vscale x 4 x i16> @test_vlseg2ff_nxv4i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -2179,7 +2072,6 @@ define <vscale x 4 x i16> @test_vlseg2ff_mask_nxv4i16(<vscale x 4 x i16> %val, p
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -2199,7 +2091,6 @@ define <vscale x 4 x i16> @test_vlseg3ff_nxv4i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg3ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -2213,12 +2104,11 @@ define <vscale x 4 x i16> @test_vlseg3ff_mask_nxv4i16(<vscale x 4 x i16> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg3ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -2238,7 +2128,6 @@ define <vscale x 4 x i16> @test_vlseg4ff_nxv4i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg4ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -2252,13 +2141,12 @@ define <vscale x 4 x i16> @test_vlseg4ff_mask_nxv4i16(<vscale x 4 x i16> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg4ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -2278,7 +2166,6 @@ define <vscale x 4 x i16> @test_vlseg5ff_nxv4i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg5ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -2292,14 +2179,13 @@ define <vscale x 4 x i16> @test_vlseg5ff_mask_nxv4i16(<vscale x 4 x i16> %val, p
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg5ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -2319,7 +2205,6 @@ define <vscale x 4 x i16> @test_vlseg6ff_nxv4i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg6ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -2333,15 +2218,14 @@ define <vscale x 4 x i16> @test_vlseg6ff_mask_nxv4i16(<vscale x 4 x i16> %val, p
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg6ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -2361,7 +2245,6 @@ define <vscale x 4 x i16> @test_vlseg7ff_nxv4i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg7ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -2375,16 +2258,15 @@ define <vscale x 4 x i16> @test_vlseg7ff_mask_nxv4i16(<vscale x 4 x i16> %val, p
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg7ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -2404,7 +2286,6 @@ define <vscale x 4 x i16> @test_vlseg8ff_nxv4i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg8ff.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %vl)
@@ -2418,17 +2299,16 @@ define <vscale x 4 x i16> @test_vlseg8ff_mask_nxv4i16(<vscale x 4 x i16> %val, p
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>, i64} @llvm.riscv.vlseg8ff.mask.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -2448,7 +2328,6 @@ define <vscale x 1 x i8> @test_vlseg2ff_nxv1i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg2e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg2ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2466,7 +2345,6 @@ define <vscale x 1 x i8> @test_vlseg2ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr
 ; CHECK-NEXT:    vlseg2e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -2486,7 +2364,6 @@ define <vscale x 1 x i8> @test_vlseg3ff_nxv1i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg3e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg3ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2500,12 +2377,11 @@ define <vscale x 1 x i8> @test_vlseg3ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vlseg3e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg3ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -2525,7 +2401,6 @@ define <vscale x 1 x i8> @test_vlseg4ff_nxv1i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg4e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg4ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2539,13 +2414,12 @@ define <vscale x 1 x i8> @test_vlseg4ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vlseg4e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg4ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -2565,7 +2439,6 @@ define <vscale x 1 x i8> @test_vlseg5ff_nxv1i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg5e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg5ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2579,14 +2452,13 @@ define <vscale x 1 x i8> @test_vlseg5ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vlseg5e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg5ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -2606,7 +2478,6 @@ define <vscale x 1 x i8> @test_vlseg6ff_nxv1i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg6e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg6ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2620,15 +2491,14 @@ define <vscale x 1 x i8> @test_vlseg6ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vlseg6e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg6ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -2648,7 +2518,6 @@ define <vscale x 1 x i8> @test_vlseg7ff_nxv1i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg7e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg7ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2662,16 +2531,15 @@ define <vscale x 1 x i8> @test_vlseg7ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vlseg7e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg7ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -2691,7 +2559,6 @@ define <vscale x 1 x i8> @test_vlseg8ff_nxv1i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg8e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg8ff.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %vl)
@@ -2705,17 +2572,16 @@ define <vscale x 1 x i8> @test_vlseg8ff_mask_nxv1i8(<vscale x 1 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vlseg8e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>, i64} @llvm.riscv.vlseg8ff.mask.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -2735,7 +2601,6 @@ define <vscale x 2 x i8> @test_vlseg2ff_nxv2i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg2e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg2ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2753,7 +2618,6 @@ define <vscale x 2 x i8> @test_vlseg2ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr
 ; CHECK-NEXT:    vlseg2e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -2773,7 +2637,6 @@ define <vscale x 2 x i8> @test_vlseg3ff_nxv2i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg3e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg3ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2787,12 +2650,11 @@ define <vscale x 2 x i8> @test_vlseg3ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vlseg3e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg3ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -2812,7 +2674,6 @@ define <vscale x 2 x i8> @test_vlseg4ff_nxv2i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg4e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg4ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2826,13 +2687,12 @@ define <vscale x 2 x i8> @test_vlseg4ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vlseg4e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg4ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -2852,7 +2712,6 @@ define <vscale x 2 x i8> @test_vlseg5ff_nxv2i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg5e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg5ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2866,14 +2725,13 @@ define <vscale x 2 x i8> @test_vlseg5ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vlseg5e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg5ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -2893,7 +2751,6 @@ define <vscale x 2 x i8> @test_vlseg6ff_nxv2i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg6e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg6ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2907,15 +2764,14 @@ define <vscale x 2 x i8> @test_vlseg6ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vlseg6e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg6ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -2935,7 +2791,6 @@ define <vscale x 2 x i8> @test_vlseg7ff_nxv2i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg7e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg7ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2949,16 +2804,15 @@ define <vscale x 2 x i8> @test_vlseg7ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vlseg7e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg7ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -2978,7 +2832,6 @@ define <vscale x 2 x i8> @test_vlseg8ff_nxv2i8(ptr %base, i64 %vl, ptr %outvl) {
 ; CHECK-NEXT:    vlseg8e8ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg8ff.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %vl)
@@ -2992,17 +2845,16 @@ define <vscale x 2 x i8> @test_vlseg8ff_mask_nxv2i8(<vscale x 2 x i8> %val, ptr
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vlseg8e8ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>, i64} @llvm.riscv.vlseg8ff.mask.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -3022,7 +2874,6 @@ define <vscale x 8 x i32> @test_vlseg2ff_nxv8i32(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e32ff.v v4, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>, i64} @llvm.riscv.vlseg2ff.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i64 %vl)
@@ -3040,7 +2891,6 @@ define <vscale x 8 x i32> @test_vlseg2ff_mask_nxv8i32(<vscale x 8 x i32> %val, p
 ; CHECK-NEXT:    vlseg2e32ff.v v4, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>, i64} @llvm.riscv.vlseg2ff.mask.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -3060,7 +2910,6 @@ define <vscale x 32 x i8> @test_vlseg2ff_nxv32i8(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e8ff.v v4, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i64 %vl)
@@ -3078,7 +2927,6 @@ define <vscale x 32 x i8> @test_vlseg2ff_mask_nxv32i8(<vscale x 32 x i8> %val, p
 ; CHECK-NEXT:    vlseg2e8ff.v v4, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>, i64} @llvm.riscv.vlseg2ff.mask.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
@@ -3098,7 +2946,6 @@ define <vscale x 2 x i16> @test_vlseg2ff_nxv2i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg2ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -3116,7 +2963,6 @@ define <vscale x 2 x i16> @test_vlseg2ff_mask_nxv2i16(<vscale x 2 x i16> %val, p
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -3136,7 +2982,6 @@ define <vscale x 2 x i16> @test_vlseg3ff_nxv2i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg3ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -3150,12 +2995,11 @@ define <vscale x 2 x i16> @test_vlseg3ff_mask_nxv2i16(<vscale x 2 x i16> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg3ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -3175,7 +3019,6 @@ define <vscale x 2 x i16> @test_vlseg4ff_nxv2i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg4ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -3189,13 +3032,12 @@ define <vscale x 2 x i16> @test_vlseg4ff_mask_nxv2i16(<vscale x 2 x i16> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg4ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -3215,7 +3057,6 @@ define <vscale x 2 x i16> @test_vlseg5ff_nxv2i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg5ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -3229,14 +3070,13 @@ define <vscale x 2 x i16> @test_vlseg5ff_mask_nxv2i16(<vscale x 2 x i16> %val, p
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg5ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -3256,7 +3096,6 @@ define <vscale x 2 x i16> @test_vlseg6ff_nxv2i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg6ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -3270,15 +3109,14 @@ define <vscale x 2 x i16> @test_vlseg6ff_mask_nxv2i16(<vscale x 2 x i16> %val, p
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg6ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -3298,7 +3136,6 @@ define <vscale x 2 x i16> @test_vlseg7ff_nxv2i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg7ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -3312,16 +3149,15 @@ define <vscale x 2 x i16> @test_vlseg7ff_mask_nxv2i16(<vscale x 2 x i16> %val, p
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg7ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -3341,7 +3177,6 @@ define <vscale x 2 x i16> @test_vlseg8ff_nxv2i16(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg8ff.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %vl)
@@ -3355,17 +3190,16 @@ define <vscale x 2 x i16> @test_vlseg8ff_mask_nxv2i16(<vscale x 2 x i16> %val, p
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>, i64} @llvm.riscv.vlseg8ff.mask.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -3385,7 +3219,6 @@ define <vscale x 2 x i64> @test_vlseg2ff_nxv2i64(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg2e64ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
@@ -3403,7 +3236,6 @@ define <vscale x 2 x i64> @test_vlseg2ff_mask_nxv2i64(<vscale x 2 x i64> %val, p
 ; CHECK-NEXT:    vlseg2e64ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg2ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -3423,7 +3255,6 @@ define <vscale x 2 x i64> @test_vlseg3ff_nxv2i64(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg3e64ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg3ff.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
@@ -3437,12 +3268,11 @@ define <vscale x 2 x i64> @test_vlseg3ff_mask_nxv2i64(<vscale x 2 x i64> %val, p
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vlseg3e64ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg3ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -3462,7 +3292,6 @@ define <vscale x 2 x i64> @test_vlseg4ff_nxv2i64(ptr %base, i64 %vl, ptr %outvl)
 ; CHECK-NEXT:    vlseg4e64ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg4ff.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %vl)
@@ -3476,13 +3305,12 @@ define <vscale x 2 x i64> @test_vlseg4ff_mask_nxv2i64(<vscale x 2 x i64> %val, p
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vlseg4e64ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>, i64} @llvm.riscv.vlseg4ff.mask.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -3502,7 +3330,6 @@ define <vscale x 16 x half> @test_vlseg2ff_nxv16f16(ptr %base, i64 %vl, ptr %out
 ; CHECK-NEXT:    vlseg2e16ff.v v4, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>, i64} @llvm.riscv.vlseg2ff.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i64 %vl)
@@ -3520,7 +3347,6 @@ define <vscale x 16 x half> @test_vlseg2ff_mask_nxv16f16(<vscale x 16 x half> %v
 ; CHECK-NEXT:    vlseg2e16ff.v v4, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>, i64} @llvm.riscv.vlseg2ff.mask.nxv16f16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -3540,7 +3366,6 @@ define <vscale x 4 x double> @test_vlseg2ff_nxv4f64(ptr %base, i64 %vl, ptr %out
 ; CHECK-NEXT:    vlseg2e64ff.v v4, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>, i64} @llvm.riscv.vlseg2ff.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i64 %vl)
@@ -3558,7 +3383,6 @@ define <vscale x 4 x double> @test_vlseg2ff_mask_nxv4f64(<vscale x 4 x double> %
 ; CHECK-NEXT:    vlseg2e64ff.v v4, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>, i64} @llvm.riscv.vlseg2ff.mask.nxv4f64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3578,7 +3402,6 @@ define <vscale x 1 x double> @test_vlseg2ff_nxv1f64(ptr %base, i64 %vl, ptr %out
 ; CHECK-NEXT:    vlseg2e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg2ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3596,7 +3419,6 @@ define <vscale x 1 x double> @test_vlseg2ff_mask_nxv1f64(<vscale x 1 x double> %
 ; CHECK-NEXT:    vlseg2e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg2ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -3616,7 +3438,6 @@ define <vscale x 1 x double> @test_vlseg3ff_nxv1f64(ptr %base, i64 %vl, ptr %out
 ; CHECK-NEXT:    vlseg3e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg3ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3630,12 +3451,11 @@ define <vscale x 1 x double> @test_vlseg3ff_mask_nxv1f64(<vscale x 1 x double> %
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg3e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg3ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -3655,7 +3475,6 @@ define <vscale x 1 x double> @test_vlseg4ff_nxv1f64(ptr %base, i64 %vl, ptr %out
 ; CHECK-NEXT:    vlseg4e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg4ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3669,13 +3488,12 @@ define <vscale x 1 x double> @test_vlseg4ff_mask_nxv1f64(<vscale x 1 x double> %
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg4e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg4ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -3695,7 +3513,6 @@ define <vscale x 1 x double> @test_vlseg5ff_nxv1f64(ptr %base, i64 %vl, ptr %out
 ; CHECK-NEXT:    vlseg5e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg5ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3709,14 +3526,13 @@ define <vscale x 1 x double> @test_vlseg5ff_mask_nxv1f64(<vscale x 1 x double> %
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg5e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg5ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -3736,7 +3552,6 @@ define <vscale x 1 x double> @test_vlseg6ff_nxv1f64(ptr %base, i64 %vl, ptr %out
 ; CHECK-NEXT:    vlseg6e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg6ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3750,15 +3565,14 @@ define <vscale x 1 x double> @test_vlseg6ff_mask_nxv1f64(<vscale x 1 x double> %
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg6e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg6ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -3778,7 +3592,6 @@ define <vscale x 1 x double> @test_vlseg7ff_nxv1f64(ptr %base, i64 %vl, ptr %out
 ; CHECK-NEXT:    vlseg7e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg7ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3792,16 +3605,15 @@ define <vscale x 1 x double> @test_vlseg7ff_mask_nxv1f64(<vscale x 1 x double> %
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg7e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg7ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -3821,7 +3633,6 @@ define <vscale x 1 x double> @test_vlseg8ff_nxv1f64(ptr %base, i64 %vl, ptr %out
 ; CHECK-NEXT:    vlseg8e64ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg8ff.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %vl)
@@ -3835,17 +3646,16 @@ define <vscale x 1 x double> @test_vlseg8ff_mask_nxv1f64(<vscale x 1 x double> %
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vlseg8e64ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>, i64} @llvm.riscv.vlseg8ff.mask.nxv1f64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -3865,7 +3675,6 @@ define <vscale x 2 x float> @test_vlseg2ff_nxv2f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg2e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg2ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3883,7 +3692,6 @@ define <vscale x 2 x float> @test_vlseg2ff_mask_nxv2f32(<vscale x 2 x float> %va
 ; CHECK-NEXT:    vlseg2e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg2ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -3903,7 +3711,6 @@ define <vscale x 2 x float> @test_vlseg3ff_nxv2f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg3e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg3ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3917,12 +3724,11 @@ define <vscale x 2 x float> @test_vlseg3ff_mask_nxv2f32(<vscale x 2 x float> %va
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg3e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg3ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -3942,7 +3748,6 @@ define <vscale x 2 x float> @test_vlseg4ff_nxv2f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg4e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg4ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3956,13 +3761,12 @@ define <vscale x 2 x float> @test_vlseg4ff_mask_nxv2f32(<vscale x 2 x float> %va
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg4e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg4ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -3982,7 +3786,6 @@ define <vscale x 2 x float> @test_vlseg5ff_nxv2f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg5e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg5ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -3996,14 +3799,13 @@ define <vscale x 2 x float> @test_vlseg5ff_mask_nxv2f32(<vscale x 2 x float> %va
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg5e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg5ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -4023,7 +3825,6 @@ define <vscale x 2 x float> @test_vlseg6ff_nxv2f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg6e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg6ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -4037,15 +3838,14 @@ define <vscale x 2 x float> @test_vlseg6ff_mask_nxv2f32(<vscale x 2 x float> %va
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg6e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg6ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -4065,7 +3865,6 @@ define <vscale x 2 x float> @test_vlseg7ff_nxv2f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg7e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg7ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -4079,16 +3878,15 @@ define <vscale x 2 x float> @test_vlseg7ff_mask_nxv2f32(<vscale x 2 x float> %va
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg7e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg7ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -4108,7 +3906,6 @@ define <vscale x 2 x float> @test_vlseg8ff_nxv2f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg8e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg8ff.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %vl)
@@ -4122,17 +3919,16 @@ define <vscale x 2 x float> @test_vlseg8ff_mask_nxv2f32(<vscale x 2 x float> %va
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vlseg8e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>, i64} @llvm.riscv.vlseg8ff.mask.nxv2f32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -4152,7 +3948,6 @@ define <vscale x 1 x half> @test_vlseg2ff_nxv1f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg2ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -4170,7 +3965,6 @@ define <vscale x 1 x half> @test_vlseg2ff_mask_nxv1f16(<vscale x 1 x half> %val,
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg2ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4190,7 +3984,6 @@ define <vscale x 1 x half> @test_vlseg3ff_nxv1f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg3ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -4204,12 +3997,11 @@ define <vscale x 1 x half> @test_vlseg3ff_mask_nxv1f16(<vscale x 1 x half> %val,
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg3ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4229,7 +4021,6 @@ define <vscale x 1 x half> @test_vlseg4ff_nxv1f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg4ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -4243,13 +4034,12 @@ define <vscale x 1 x half> @test_vlseg4ff_mask_nxv1f16(<vscale x 1 x half> %val,
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg4ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4269,7 +4059,6 @@ define <vscale x 1 x half> @test_vlseg5ff_nxv1f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg5ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -4283,14 +4072,13 @@ define <vscale x 1 x half> @test_vlseg5ff_mask_nxv1f16(<vscale x 1 x half> %val,
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg5ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4310,7 +4098,6 @@ define <vscale x 1 x half> @test_vlseg6ff_nxv1f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg6ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -4324,15 +4111,14 @@ define <vscale x 1 x half> @test_vlseg6ff_mask_nxv1f16(<vscale x 1 x half> %val,
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg6ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4352,7 +4138,6 @@ define <vscale x 1 x half> @test_vlseg7ff_nxv1f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg7ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -4366,16 +4151,15 @@ define <vscale x 1 x half> @test_vlseg7ff_mask_nxv1f16(<vscale x 1 x half> %val,
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg7ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4395,7 +4179,6 @@ define <vscale x 1 x half> @test_vlseg8ff_nxv1f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg8ff.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %vl)
@@ -4409,17 +4192,16 @@ define <vscale x 1 x half> @test_vlseg8ff_mask_nxv1f16(<vscale x 1 x half> %val,
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>, i64} @llvm.riscv.vlseg8ff.mask.nxv1f16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4439,7 +4221,6 @@ define <vscale x 1 x float> @test_vlseg2ff_nxv1f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg2e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg2ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -4457,7 +4238,6 @@ define <vscale x 1 x float> @test_vlseg2ff_mask_nxv1f32(<vscale x 1 x float> %va
 ; CHECK-NEXT:    vlseg2e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg2ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4477,7 +4257,6 @@ define <vscale x 1 x float> @test_vlseg3ff_nxv1f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg3e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg3ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -4491,12 +4270,11 @@ define <vscale x 1 x float> @test_vlseg3ff_mask_nxv1f32(<vscale x 1 x float> %va
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg3e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg3ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4516,7 +4294,6 @@ define <vscale x 1 x float> @test_vlseg4ff_nxv1f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg4e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg4ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -4530,13 +4307,12 @@ define <vscale x 1 x float> @test_vlseg4ff_mask_nxv1f32(<vscale x 1 x float> %va
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg4e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg4ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4556,7 +4332,6 @@ define <vscale x 1 x float> @test_vlseg5ff_nxv1f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg5e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg5ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -4570,14 +4345,13 @@ define <vscale x 1 x float> @test_vlseg5ff_mask_nxv1f32(<vscale x 1 x float> %va
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg5e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg5ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4597,7 +4371,6 @@ define <vscale x 1 x float> @test_vlseg6ff_nxv1f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg6e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg6ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -4611,15 +4384,14 @@ define <vscale x 1 x float> @test_vlseg6ff_mask_nxv1f32(<vscale x 1 x float> %va
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg6e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg6ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4639,7 +4411,6 @@ define <vscale x 1 x float> @test_vlseg7ff_nxv1f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg7e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg7ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -4653,16 +4424,15 @@ define <vscale x 1 x float> @test_vlseg7ff_mask_nxv1f32(<vscale x 1 x float> %va
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg7e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg7ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4682,7 +4452,6 @@ define <vscale x 1 x float> @test_vlseg8ff_nxv1f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg8e32ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg8ff.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %vl)
@@ -4696,17 +4465,16 @@ define <vscale x 1 x float> @test_vlseg8ff_mask_nxv1f32(<vscale x 1 x float> %va
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vlseg8e32ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>, i64} @llvm.riscv.vlseg8ff.mask.nxv1f32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4726,7 +4494,6 @@ define <vscale x 8 x half> @test_vlseg2ff_nxv8f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg2e16ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg2ff.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
@@ -4744,7 +4511,6 @@ define <vscale x 8 x half> @test_vlseg2ff_mask_nxv8f16(<vscale x 8 x half> %val,
 ; CHECK-NEXT:    vlseg2e16ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg2ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -4764,7 +4530,6 @@ define <vscale x 8 x half> @test_vlseg3ff_nxv8f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg3e16ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg3ff.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
@@ -4778,12 +4543,11 @@ define <vscale x 8 x half> @test_vlseg3ff_mask_nxv8f16(<vscale x 8 x half> %val,
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vlseg3e16ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg3ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -4803,7 +4567,6 @@ define <vscale x 8 x half> @test_vlseg4ff_nxv8f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg4e16ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg4ff.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %vl)
@@ -4817,13 +4580,12 @@ define <vscale x 8 x half> @test_vlseg4ff_mask_nxv8f16(<vscale x 8 x half> %val,
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vlseg4e16ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>, i64} @llvm.riscv.vlseg4ff.mask.nxv8f16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -4843,7 +4605,6 @@ define <vscale x 8 x float> @test_vlseg2ff_nxv8f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg2e32ff.v v4, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>, i64} @llvm.riscv.vlseg2ff.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i64 %vl)
@@ -4861,7 +4622,6 @@ define <vscale x 8 x float> @test_vlseg2ff_mask_nxv8f32(<vscale x 8 x float> %va
 ; CHECK-NEXT:    vlseg2e32ff.v v4, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>, i64} @llvm.riscv.vlseg2ff.mask.nxv8f32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -4881,7 +4641,6 @@ define <vscale x 2 x double> @test_vlseg2ff_nxv2f64(ptr %base, i64 %vl, ptr %out
 ; CHECK-NEXT:    vlseg2e64ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg2ff.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
@@ -4899,7 +4658,6 @@ define <vscale x 2 x double> @test_vlseg2ff_mask_nxv2f64(<vscale x 2 x double> %
 ; CHECK-NEXT:    vlseg2e64ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg2ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -4919,7 +4677,6 @@ define <vscale x 2 x double> @test_vlseg3ff_nxv2f64(ptr %base, i64 %vl, ptr %out
 ; CHECK-NEXT:    vlseg3e64ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg3ff.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
@@ -4933,12 +4690,11 @@ define <vscale x 2 x double> @test_vlseg3ff_mask_nxv2f64(<vscale x 2 x double> %
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vlseg3e64ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg3ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -4958,7 +4714,6 @@ define <vscale x 2 x double> @test_vlseg4ff_nxv2f64(ptr %base, i64 %vl, ptr %out
 ; CHECK-NEXT:    vlseg4e64ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg4ff.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %vl)
@@ -4972,13 +4727,12 @@ define <vscale x 2 x double> @test_vlseg4ff_mask_nxv2f64(<vscale x 2 x double> %
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vlseg4e64ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>, i64} @llvm.riscv.vlseg4ff.mask.nxv2f64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -4998,7 +4752,6 @@ define <vscale x 4 x half> @test_vlseg2ff_nxv4f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg2ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -5016,7 +4769,6 @@ define <vscale x 4 x half> @test_vlseg2ff_mask_nxv4f16(<vscale x 4 x half> %val,
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg2ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -5036,7 +4788,6 @@ define <vscale x 4 x half> @test_vlseg3ff_nxv4f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg3ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -5050,12 +4801,11 @@ define <vscale x 4 x half> @test_vlseg3ff_mask_nxv4f16(<vscale x 4 x half> %val,
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg3ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -5075,7 +4825,6 @@ define <vscale x 4 x half> @test_vlseg4ff_nxv4f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg4ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -5089,13 +4838,12 @@ define <vscale x 4 x half> @test_vlseg4ff_mask_nxv4f16(<vscale x 4 x half> %val,
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg4ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -5115,7 +4863,6 @@ define <vscale x 4 x half> @test_vlseg5ff_nxv4f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg5ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -5129,14 +4876,13 @@ define <vscale x 4 x half> @test_vlseg5ff_mask_nxv4f16(<vscale x 4 x half> %val,
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg5ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -5156,7 +4902,6 @@ define <vscale x 4 x half> @test_vlseg6ff_nxv4f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg6ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -5170,15 +4915,14 @@ define <vscale x 4 x half> @test_vlseg6ff_mask_nxv4f16(<vscale x 4 x half> %val,
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg6ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -5198,7 +4942,6 @@ define <vscale x 4 x half> @test_vlseg7ff_nxv4f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg7ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -5212,16 +4955,15 @@ define <vscale x 4 x half> @test_vlseg7ff_mask_nxv4f16(<vscale x 4 x half> %val,
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg7ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -5241,7 +4983,6 @@ define <vscale x 4 x half> @test_vlseg8ff_nxv4f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg8ff.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %vl)
@@ -5255,17 +4996,16 @@ define <vscale x 4 x half> @test_vlseg8ff_mask_nxv4f16(<vscale x 4 x half> %val,
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>, i64} @llvm.riscv.vlseg8ff.mask.nxv4f16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -5285,7 +5025,6 @@ define <vscale x 2 x half> @test_vlseg2ff_nxv2f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg2ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -5303,7 +5042,6 @@ define <vscale x 2 x half> @test_vlseg2ff_mask_nxv2f16(<vscale x 2 x half> %val,
 ; CHECK-NEXT:    vlseg2e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg2ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5323,7 +5061,6 @@ define <vscale x 2 x half> @test_vlseg3ff_nxv2f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg3ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -5337,12 +5074,11 @@ define <vscale x 2 x half> @test_vlseg3ff_mask_nxv2f16(<vscale x 2 x half> %val,
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg3e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg3ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5362,7 +5098,6 @@ define <vscale x 2 x half> @test_vlseg4ff_nxv2f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg4ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -5376,13 +5111,12 @@ define <vscale x 2 x half> @test_vlseg4ff_mask_nxv2f16(<vscale x 2 x half> %val,
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg4e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg4ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5402,7 +5136,6 @@ define <vscale x 2 x half> @test_vlseg5ff_nxv2f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg5ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -5416,14 +5149,13 @@ define <vscale x 2 x half> @test_vlseg5ff_mask_nxv2f16(<vscale x 2 x half> %val,
 ; CHECK-LABEL: test_vlseg5ff_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg5e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg5ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5443,7 +5175,6 @@ define <vscale x 2 x half> @test_vlseg6ff_nxv2f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg6ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -5457,15 +5188,14 @@ define <vscale x 2 x half> @test_vlseg6ff_mask_nxv2f16(<vscale x 2 x half> %val,
 ; CHECK-LABEL: test_vlseg6ff_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg6e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg6ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5485,7 +5215,6 @@ define <vscale x 2 x half> @test_vlseg7ff_nxv2f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg7ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -5499,16 +5228,15 @@ define <vscale x 2 x half> @test_vlseg7ff_mask_nxv2f16(<vscale x 2 x half> %val,
 ; CHECK-LABEL: test_vlseg7ff_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg7e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg7ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5528,7 +5256,6 @@ define <vscale x 2 x half> @test_vlseg8ff_nxv2f16(ptr %base, i64 %vl, ptr %outvl
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg8ff.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %vl)
@@ -5542,17 +5269,16 @@ define <vscale x 2 x half> @test_vlseg8ff_mask_nxv2f16(<vscale x 2 x half> %val,
 ; CHECK-LABEL: test_vlseg8ff_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vlseg8e16ff.v v7, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>, i64} @llvm.riscv.vlseg8ff.mask.nxv2f16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5572,7 +5298,6 @@ define <vscale x 4 x float> @test_vlseg2ff_nxv4f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg2e32ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg2ff.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)
@@ -5590,7 +5315,6 @@ define <vscale x 4 x float> @test_vlseg2ff_mask_nxv4f32(<vscale x 4 x float> %va
 ; CHECK-NEXT:    vlseg2e32ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg2ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -5610,7 +5334,6 @@ define <vscale x 4 x float> @test_vlseg3ff_nxv4f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg3e32ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg3ff.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)
@@ -5624,12 +5347,11 @@ define <vscale x 4 x float> @test_vlseg3ff_mask_nxv4f32(<vscale x 4 x float> %va
 ; CHECK-LABEL: test_vlseg3ff_mask_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vlseg3e32ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg3ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -5649,7 +5371,6 @@ define <vscale x 4 x float> @test_vlseg4ff_nxv4f32(ptr %base, i64 %vl, ptr %outv
 ; CHECK-NEXT:    vlseg4e32ff.v v6, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg4ff.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %vl)
@@ -5663,13 +5384,12 @@ define <vscale x 4 x float> @test_vlseg4ff_mask_nxv4f32(<vscale x 4 x float> %va
 ; CHECK-LABEL: test_vlseg4ff_mask_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vlseg4e32ff.v v6, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a2)
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>, i64} @llvm.riscv.vlseg4ff.mask.nxv4f32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl, i64 1)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll
index 775f07e4a205a..2773c8c319eb9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv32.ll
@@ -10,7 +10,6 @@ define <vscale x 16 x i16> @test_vlsseg2_nxv16i16(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -25,7 +24,6 @@ define <vscale x 16 x i16> @test_vlsseg2_mask_nxv16i16(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -43,7 +41,6 @@ define <vscale x 1 x i8> @test_vlsseg2_nxv1i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -58,7 +55,6 @@ define <vscale x 1 x i8> @test_vlsseg2_mask_nxv1i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -76,7 +72,6 @@ define <vscale x 1 x i8> @test_vlsseg3_nxv1i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -92,7 +87,6 @@ define <vscale x 1 x i8> @test_vlsseg3_mask_nxv1i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -110,7 +104,6 @@ define <vscale x 1 x i8> @test_vlsseg4_nxv1i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -127,7 +120,6 @@ define <vscale x 1 x i8> @test_vlsseg4_mask_nxv1i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -145,7 +137,6 @@ define <vscale x 1 x i8> @test_vlsseg5_nxv1i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -163,7 +154,6 @@ define <vscale x 1 x i8> @test_vlsseg5_mask_nxv1i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -181,7 +171,6 @@ define <vscale x 1 x i8> @test_vlsseg6_nxv1i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -200,7 +189,6 @@ define <vscale x 1 x i8> @test_vlsseg6_mask_nxv1i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -218,7 +206,6 @@ define <vscale x 1 x i8> @test_vlsseg7_nxv1i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -238,7 +225,6 @@ define <vscale x 1 x i8> @test_vlsseg7_mask_nxv1i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -256,7 +242,6 @@ define <vscale x 1 x i8> @test_vlsseg8_nxv1i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -277,7 +262,6 @@ define <vscale x 1 x i8> @test_vlsseg8_mask_nxv1i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -295,7 +279,6 @@ define <vscale x 16 x i8> @test_vlsseg2_nxv16i8(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
 ; CHECK-NEXT:    vlsseg2e8.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -310,7 +293,6 @@ define <vscale x 16 x i8> @test_vlsseg2_mask_nxv16i8(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vlsseg2e8.v v6, (a0), a1
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlsseg2e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -328,7 +310,6 @@ define <vscale x 16 x i8> @test_vlsseg3_nxv16i8(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
 ; CHECK-NEXT:    vlsseg3e8.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -344,7 +325,6 @@ define <vscale x 16 x i8> @test_vlsseg3_mask_nxv16i8(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlsseg3e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -362,7 +342,6 @@ define <vscale x 16 x i8> @test_vlsseg4_nxv16i8(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
 ; CHECK-NEXT:    vlsseg4e8.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -379,7 +358,6 @@ define <vscale x 16 x i8> @test_vlsseg4_mask_nxv16i8(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlsseg4e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -397,7 +375,6 @@ define <vscale x 2 x i32> @test_vlsseg2_nxv2i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -412,7 +389,6 @@ define <vscale x 2 x i32> @test_vlsseg2_mask_nxv2i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -430,7 +406,6 @@ define <vscale x 2 x i32> @test_vlsseg3_nxv2i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -446,7 +421,6 @@ define <vscale x 2 x i32> @test_vlsseg3_mask_nxv2i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -464,7 +438,6 @@ define <vscale x 2 x i32> @test_vlsseg4_nxv2i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -481,7 +454,6 @@ define <vscale x 2 x i32> @test_vlsseg4_mask_nxv2i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -499,7 +471,6 @@ define <vscale x 2 x i32> @test_vlsseg5_nxv2i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -517,7 +488,6 @@ define <vscale x 2 x i32> @test_vlsseg5_mask_nxv2i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -535,7 +505,6 @@ define <vscale x 2 x i32> @test_vlsseg6_nxv2i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -554,7 +523,6 @@ define <vscale x 2 x i32> @test_vlsseg6_mask_nxv2i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -572,7 +540,6 @@ define <vscale x 2 x i32> @test_vlsseg7_nxv2i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -592,7 +559,6 @@ define <vscale x 2 x i32> @test_vlsseg7_mask_nxv2i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -610,7 +576,6 @@ define <vscale x 2 x i32> @test_vlsseg8_nxv2i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -631,7 +596,6 @@ define <vscale x 2 x i32> @test_vlsseg8_mask_nxv2i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -649,7 +613,6 @@ define <vscale x 4 x i16> @test_vlsseg2_nxv4i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -664,7 +627,6 @@ define <vscale x 4 x i16> @test_vlsseg2_mask_nxv4i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -682,7 +644,6 @@ define <vscale x 4 x i16> @test_vlsseg3_nxv4i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -698,7 +659,6 @@ define <vscale x 4 x i16> @test_vlsseg3_mask_nxv4i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -716,7 +676,6 @@ define <vscale x 4 x i16> @test_vlsseg4_nxv4i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -733,7 +692,6 @@ define <vscale x 4 x i16> @test_vlsseg4_mask_nxv4i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -751,7 +709,6 @@ define <vscale x 4 x i16> @test_vlsseg5_nxv4i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -769,7 +726,6 @@ define <vscale x 4 x i16> @test_vlsseg5_mask_nxv4i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -787,7 +743,6 @@ define <vscale x 4 x i16> @test_vlsseg6_nxv4i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -806,7 +761,6 @@ define <vscale x 4 x i16> @test_vlsseg6_mask_nxv4i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -824,7 +778,6 @@ define <vscale x 4 x i16> @test_vlsseg7_nxv4i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -844,7 +797,6 @@ define <vscale x 4 x i16> @test_vlsseg7_mask_nxv4i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -862,7 +814,6 @@ define <vscale x 4 x i16> @test_vlsseg8_nxv4i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -883,7 +834,6 @@ define <vscale x 4 x i16> @test_vlsseg8_mask_nxv4i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -901,7 +851,6 @@ define <vscale x 1 x i32> @test_vlsseg2_nxv1i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -916,7 +865,6 @@ define <vscale x 1 x i32> @test_vlsseg2_mask_nxv1i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -934,7 +882,6 @@ define <vscale x 1 x i32> @test_vlsseg3_nxv1i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -950,7 +897,6 @@ define <vscale x 1 x i32> @test_vlsseg3_mask_nxv1i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -968,7 +914,6 @@ define <vscale x 1 x i32> @test_vlsseg4_nxv1i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -985,7 +930,6 @@ define <vscale x 1 x i32> @test_vlsseg4_mask_nxv1i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1003,7 +947,6 @@ define <vscale x 1 x i32> @test_vlsseg5_nxv1i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1021,7 +964,6 @@ define <vscale x 1 x i32> @test_vlsseg5_mask_nxv1i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1039,7 +981,6 @@ define <vscale x 1 x i32> @test_vlsseg6_nxv1i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1058,7 +999,6 @@ define <vscale x 1 x i32> @test_vlsseg6_mask_nxv1i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1076,7 +1016,6 @@ define <vscale x 1 x i32> @test_vlsseg7_nxv1i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1096,7 +1035,6 @@ define <vscale x 1 x i32> @test_vlsseg7_mask_nxv1i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1114,7 +1052,6 @@ define <vscale x 1 x i32> @test_vlsseg8_nxv1i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1135,7 +1072,6 @@ define <vscale x 1 x i32> @test_vlsseg8_mask_nxv1i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1153,7 +1089,6 @@ define <vscale x 8 x i16> @test_vlsseg2_nxv8i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1168,7 +1103,6 @@ define <vscale x 8 x i16> @test_vlsseg2_mask_nxv8i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1186,7 +1120,6 @@ define <vscale x 8 x i16> @test_vlsseg3_nxv8i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vlsseg3e16.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1202,7 +1135,6 @@ define <vscale x 8 x i16> @test_vlsseg3_mask_nxv8i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlsseg3e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1220,7 +1152,6 @@ define <vscale x 8 x i16> @test_vlsseg4_nxv8i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vlsseg4e16.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1237,7 +1168,6 @@ define <vscale x 8 x i16> @test_vlsseg4_mask_nxv8i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlsseg4e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1255,7 +1185,6 @@ define <vscale x 8 x i8> @test_vlsseg2_nxv8i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1270,7 +1199,6 @@ define <vscale x 8 x i8> @test_vlsseg2_mask_nxv8i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1288,7 +1216,6 @@ define <vscale x 8 x i8> @test_vlsseg3_nxv8i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1304,7 +1231,6 @@ define <vscale x 8 x i8> @test_vlsseg3_mask_nxv8i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1322,7 +1248,6 @@ define <vscale x 8 x i8> @test_vlsseg4_nxv8i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1339,7 +1264,6 @@ define <vscale x 8 x i8> @test_vlsseg4_mask_nxv8i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1357,7 +1281,6 @@ define <vscale x 8 x i8> @test_vlsseg5_nxv8i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1375,7 +1298,6 @@ define <vscale x 8 x i8> @test_vlsseg5_mask_nxv8i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1393,7 +1315,6 @@ define <vscale x 8 x i8> @test_vlsseg6_nxv8i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1412,7 +1333,6 @@ define <vscale x 8 x i8> @test_vlsseg6_mask_nxv8i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1430,7 +1350,6 @@ define <vscale x 8 x i8> @test_vlsseg7_nxv8i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1450,7 +1369,6 @@ define <vscale x 8 x i8> @test_vlsseg7_mask_nxv8i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1468,7 +1386,6 @@ define <vscale x 8 x i8> @test_vlsseg8_nxv8i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1489,7 +1406,6 @@ define <vscale x 8 x i8> @test_vlsseg8_mask_nxv8i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1507,7 +1423,6 @@ define <vscale x 8 x i32> @test_vlsseg2_nxv8i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
 ; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1522,7 +1437,6 @@ define <vscale x 8 x i32> @test_vlsseg2_mask_nxv8i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1540,7 +1454,6 @@ define <vscale x 4 x i8> @test_vlsseg2_nxv4i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1555,7 +1468,6 @@ define <vscale x 4 x i8> @test_vlsseg2_mask_nxv4i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1573,7 +1485,6 @@ define <vscale x 4 x i8> @test_vlsseg3_nxv4i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1589,7 +1500,6 @@ define <vscale x 4 x i8> @test_vlsseg3_mask_nxv4i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1607,7 +1517,6 @@ define <vscale x 4 x i8> @test_vlsseg4_nxv4i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1624,7 +1533,6 @@ define <vscale x 4 x i8> @test_vlsseg4_mask_nxv4i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1642,7 +1550,6 @@ define <vscale x 4 x i8> @test_vlsseg5_nxv4i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1660,7 +1567,6 @@ define <vscale x 4 x i8> @test_vlsseg5_mask_nxv4i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1678,7 +1584,6 @@ define <vscale x 4 x i8> @test_vlsseg6_nxv4i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1697,7 +1602,6 @@ define <vscale x 4 x i8> @test_vlsseg6_mask_nxv4i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1715,7 +1619,6 @@ define <vscale x 4 x i8> @test_vlsseg7_nxv4i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1735,7 +1638,6 @@ define <vscale x 4 x i8> @test_vlsseg7_mask_nxv4i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1753,7 +1655,6 @@ define <vscale x 4 x i8> @test_vlsseg8_nxv4i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1774,7 +1675,6 @@ define <vscale x 4 x i8> @test_vlsseg8_mask_nxv4i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1792,7 +1692,6 @@ define <vscale x 1 x i16> @test_vlsseg2_nxv1i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1807,7 +1706,6 @@ define <vscale x 1 x i16> @test_vlsseg2_mask_nxv1i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1825,7 +1723,6 @@ define <vscale x 1 x i16> @test_vlsseg3_nxv1i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1841,7 +1738,6 @@ define <vscale x 1 x i16> @test_vlsseg3_mask_nxv1i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1859,7 +1755,6 @@ define <vscale x 1 x i16> @test_vlsseg4_nxv1i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1876,7 +1771,6 @@ define <vscale x 1 x i16> @test_vlsseg4_mask_nxv1i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1894,7 +1788,6 @@ define <vscale x 1 x i16> @test_vlsseg5_nxv1i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1912,7 +1805,6 @@ define <vscale x 1 x i16> @test_vlsseg5_mask_nxv1i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1930,7 +1822,6 @@ define <vscale x 1 x i16> @test_vlsseg6_nxv1i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1949,7 +1840,6 @@ define <vscale x 1 x i16> @test_vlsseg6_mask_nxv1i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1967,7 +1857,6 @@ define <vscale x 1 x i16> @test_vlsseg7_nxv1i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -1987,7 +1876,6 @@ define <vscale x 1 x i16> @test_vlsseg7_mask_nxv1i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2005,7 +1893,6 @@ define <vscale x 1 x i16> @test_vlsseg8_nxv1i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2026,7 +1913,6 @@ define <vscale x 1 x i16> @test_vlsseg8_mask_nxv1i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2044,7 +1930,6 @@ define <vscale x 32 x i8> @test_vlsseg2_nxv32i8(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, ma
 ; CHECK-NEXT:    vlsseg2e8.v v4, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2059,7 +1944,6 @@ define <vscale x 32 x i8> @test_vlsseg2_mask_nxv32i8(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vlsseg2e8.v v4, (a0), a1
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlsseg2e8.v v4, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2077,7 +1961,6 @@ define <vscale x 2 x i8> @test_vlsseg2_nxv2i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2092,7 +1975,6 @@ define <vscale x 2 x i8> @test_vlsseg2_mask_nxv2i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2110,7 +1992,6 @@ define <vscale x 2 x i8> @test_vlsseg3_nxv2i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2126,7 +2007,6 @@ define <vscale x 2 x i8> @test_vlsseg3_mask_nxv2i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2144,7 +2024,6 @@ define <vscale x 2 x i8> @test_vlsseg4_nxv2i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2161,7 +2040,6 @@ define <vscale x 2 x i8> @test_vlsseg4_mask_nxv2i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2179,7 +2057,6 @@ define <vscale x 2 x i8> @test_vlsseg5_nxv2i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2197,7 +2074,6 @@ define <vscale x 2 x i8> @test_vlsseg5_mask_nxv2i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2215,7 +2091,6 @@ define <vscale x 2 x i8> @test_vlsseg6_nxv2i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2234,7 +2109,6 @@ define <vscale x 2 x i8> @test_vlsseg6_mask_nxv2i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2252,7 +2126,6 @@ define <vscale x 2 x i8> @test_vlsseg7_nxv2i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2272,7 +2145,6 @@ define <vscale x 2 x i8> @test_vlsseg7_mask_nxv2i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2290,7 +2162,6 @@ define <vscale x 2 x i8> @test_vlsseg8_nxv2i8(ptr %base, i32 %offset, i32 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2311,7 +2182,6 @@ define <vscale x 2 x i8> @test_vlsseg8_mask_nxv2i8(ptr %base, i32 %offset, i32 %
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2329,7 +2199,6 @@ define <vscale x 2 x i16> @test_vlsseg2_nxv2i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2344,7 +2213,6 @@ define <vscale x 2 x i16> @test_vlsseg2_mask_nxv2i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2362,7 +2230,6 @@ define <vscale x 2 x i16> @test_vlsseg3_nxv2i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2378,7 +2245,6 @@ define <vscale x 2 x i16> @test_vlsseg3_mask_nxv2i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2396,7 +2262,6 @@ define <vscale x 2 x i16> @test_vlsseg4_nxv2i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2413,7 +2278,6 @@ define <vscale x 2 x i16> @test_vlsseg4_mask_nxv2i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2431,7 +2295,6 @@ define <vscale x 2 x i16> @test_vlsseg5_nxv2i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2449,7 +2312,6 @@ define <vscale x 2 x i16> @test_vlsseg5_mask_nxv2i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2467,7 +2329,6 @@ define <vscale x 2 x i16> @test_vlsseg6_nxv2i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2486,7 +2347,6 @@ define <vscale x 2 x i16> @test_vlsseg6_mask_nxv2i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2504,7 +2364,6 @@ define <vscale x 2 x i16> @test_vlsseg7_nxv2i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2524,7 +2383,6 @@ define <vscale x 2 x i16> @test_vlsseg7_mask_nxv2i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2542,7 +2400,6 @@ define <vscale x 2 x i16> @test_vlsseg8_nxv2i16(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2563,7 +2420,6 @@ define <vscale x 2 x i16> @test_vlsseg8_mask_nxv2i16(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2581,7 +2437,6 @@ define <vscale x 4 x i32> @test_vlsseg2_nxv4i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2596,7 +2451,6 @@ define <vscale x 4 x i32> @test_vlsseg2_mask_nxv4i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2614,7 +2468,6 @@ define <vscale x 4 x i32> @test_vlsseg3_nxv4i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vlsseg3e32.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2630,7 +2483,6 @@ define <vscale x 4 x i32> @test_vlsseg3_mask_nxv4i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlsseg3e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2648,7 +2500,6 @@ define <vscale x 4 x i32> @test_vlsseg4_nxv4i32(ptr %base, i32 %offset, i32 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vlsseg4e32.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2665,7 +2516,6 @@ define <vscale x 4 x i32> @test_vlsseg4_mask_nxv4i32(ptr %base, i32 %offset, i32
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlsseg4e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2683,7 +2533,6 @@ define <vscale x 16 x half> @test_vlsseg2_nxv16f16(ptr %base, i32 %offset, i32 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2698,7 +2547,6 @@ define <vscale x 16 x half> @test_vlsseg2_mask_nxv16f16(ptr %base, i32 %offset,
 ; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2716,7 +2564,6 @@ define <vscale x 4 x double> @test_vlsseg2_nxv4f64(ptr %base, i32 %offset, i32 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; CHECK-NEXT:    vlsseg2e64.v v4, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2731,7 +2578,6 @@ define <vscale x 4 x double> @test_vlsseg2_mask_nxv4f64(ptr %base, i32 %offset,
 ; CHECK-NEXT:    vlsseg2e64.v v4, (a0), a1
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlsseg2e64.v v4, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2749,7 +2595,6 @@ define <vscale x 1 x double> @test_vlsseg2_nxv1f64(ptr %base, i32 %offset, i32 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg2e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2764,7 +2609,6 @@ define <vscale x 1 x double> @test_vlsseg2_mask_nxv1f64(ptr %base, i32 %offset,
 ; CHECK-NEXT:    vlsseg2e64.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2782,7 +2626,6 @@ define <vscale x 1 x double> @test_vlsseg3_nxv1f64(ptr %base, i32 %offset, i32 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg3e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2798,7 +2641,6 @@ define <vscale x 1 x double> @test_vlsseg3_mask_nxv1f64(ptr %base, i32 %offset,
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2816,7 +2658,6 @@ define <vscale x 1 x double> @test_vlsseg4_nxv1f64(ptr %base, i32 %offset, i32 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg4e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2833,7 +2674,6 @@ define <vscale x 1 x double> @test_vlsseg4_mask_nxv1f64(ptr %base, i32 %offset,
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2851,7 +2691,6 @@ define <vscale x 1 x double> @test_vlsseg5_nxv1f64(ptr %base, i32 %offset, i32 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg5e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2869,7 +2708,6 @@ define <vscale x 1 x double> @test_vlsseg5_mask_nxv1f64(ptr %base, i32 %offset,
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2887,7 +2725,6 @@ define <vscale x 1 x double> @test_vlsseg6_nxv1f64(ptr %base, i32 %offset, i32 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg6e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2906,7 +2743,6 @@ define <vscale x 1 x double> @test_vlsseg6_mask_nxv1f64(ptr %base, i32 %offset,
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2924,7 +2760,6 @@ define <vscale x 1 x double> @test_vlsseg7_nxv1f64(ptr %base, i32 %offset, i32 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg7e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2944,7 +2779,6 @@ define <vscale x 1 x double> @test_vlsseg7_mask_nxv1f64(ptr %base, i32 %offset,
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2962,7 +2796,6 @@ define <vscale x 1 x double> @test_vlsseg8_nxv1f64(ptr %base, i32 %offset, i32 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg8e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -2983,7 +2816,6 @@ define <vscale x 1 x double> @test_vlsseg8_mask_nxv1f64(ptr %base, i32 %offset,
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3001,7 +2833,6 @@ define <vscale x 2 x float> @test_vlsseg2_nxv2f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3016,7 +2847,6 @@ define <vscale x 2 x float> @test_vlsseg2_mask_nxv2f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3034,7 +2864,6 @@ define <vscale x 2 x float> @test_vlsseg3_nxv2f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3050,7 +2879,6 @@ define <vscale x 2 x float> @test_vlsseg3_mask_nxv2f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3068,7 +2896,6 @@ define <vscale x 2 x float> @test_vlsseg4_nxv2f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3085,7 +2912,6 @@ define <vscale x 2 x float> @test_vlsseg4_mask_nxv2f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3103,7 +2929,6 @@ define <vscale x 2 x float> @test_vlsseg5_nxv2f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3121,7 +2946,6 @@ define <vscale x 2 x float> @test_vlsseg5_mask_nxv2f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3139,7 +2963,6 @@ define <vscale x 2 x float> @test_vlsseg6_nxv2f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3158,7 +2981,6 @@ define <vscale x 2 x float> @test_vlsseg6_mask_nxv2f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3176,7 +2998,6 @@ define <vscale x 2 x float> @test_vlsseg7_nxv2f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3196,7 +3017,6 @@ define <vscale x 2 x float> @test_vlsseg7_mask_nxv2f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3214,7 +3034,6 @@ define <vscale x 2 x float> @test_vlsseg8_nxv2f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3235,7 +3054,6 @@ define <vscale x 2 x float> @test_vlsseg8_mask_nxv2f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3253,7 +3071,6 @@ define <vscale x 1 x half> @test_vlsseg2_nxv1f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3268,7 +3085,6 @@ define <vscale x 1 x half> @test_vlsseg2_mask_nxv1f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3286,7 +3102,6 @@ define <vscale x 1 x half> @test_vlsseg3_nxv1f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3302,7 +3117,6 @@ define <vscale x 1 x half> @test_vlsseg3_mask_nxv1f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3320,7 +3134,6 @@ define <vscale x 1 x half> @test_vlsseg4_nxv1f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3337,7 +3150,6 @@ define <vscale x 1 x half> @test_vlsseg4_mask_nxv1f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3355,7 +3167,6 @@ define <vscale x 1 x half> @test_vlsseg5_nxv1f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3373,7 +3184,6 @@ define <vscale x 1 x half> @test_vlsseg5_mask_nxv1f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3391,7 +3201,6 @@ define <vscale x 1 x half> @test_vlsseg6_nxv1f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3410,7 +3219,6 @@ define <vscale x 1 x half> @test_vlsseg6_mask_nxv1f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3428,7 +3236,6 @@ define <vscale x 1 x half> @test_vlsseg7_nxv1f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3448,7 +3255,6 @@ define <vscale x 1 x half> @test_vlsseg7_mask_nxv1f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3466,7 +3272,6 @@ define <vscale x 1 x half> @test_vlsseg8_nxv1f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3487,7 +3292,6 @@ define <vscale x 1 x half> @test_vlsseg8_mask_nxv1f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3505,7 +3309,6 @@ define <vscale x 1 x float> @test_vlsseg2_nxv1f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3520,7 +3323,6 @@ define <vscale x 1 x float> @test_vlsseg2_mask_nxv1f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3538,7 +3340,6 @@ define <vscale x 1 x float> @test_vlsseg3_nxv1f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3554,7 +3355,6 @@ define <vscale x 1 x float> @test_vlsseg3_mask_nxv1f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3572,7 +3372,6 @@ define <vscale x 1 x float> @test_vlsseg4_nxv1f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3589,7 +3388,6 @@ define <vscale x 1 x float> @test_vlsseg4_mask_nxv1f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3607,7 +3405,6 @@ define <vscale x 1 x float> @test_vlsseg5_nxv1f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3625,7 +3422,6 @@ define <vscale x 1 x float> @test_vlsseg5_mask_nxv1f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3643,7 +3439,6 @@ define <vscale x 1 x float> @test_vlsseg6_nxv1f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3662,7 +3457,6 @@ define <vscale x 1 x float> @test_vlsseg6_mask_nxv1f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3680,7 +3474,6 @@ define <vscale x 1 x float> @test_vlsseg7_nxv1f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3700,7 +3493,6 @@ define <vscale x 1 x float> @test_vlsseg7_mask_nxv1f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3718,7 +3510,6 @@ define <vscale x 1 x float> @test_vlsseg8_nxv1f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3739,7 +3530,6 @@ define <vscale x 1 x float> @test_vlsseg8_mask_nxv1f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3757,7 +3547,6 @@ define <vscale x 8 x half> @test_vlsseg2_nxv8f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3772,7 +3561,6 @@ define <vscale x 8 x half> @test_vlsseg2_mask_nxv8f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3790,7 +3578,6 @@ define <vscale x 8 x half> @test_vlsseg3_nxv8f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vlsseg3e16.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3806,7 +3593,6 @@ define <vscale x 8 x half> @test_vlsseg3_mask_nxv8f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlsseg3e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3824,7 +3610,6 @@ define <vscale x 8 x half> @test_vlsseg4_nxv8f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vlsseg4e16.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3841,7 +3626,6 @@ define <vscale x 8 x half> @test_vlsseg4_mask_nxv8f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlsseg4e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3859,7 +3643,6 @@ define <vscale x 8 x float> @test_vlsseg2_nxv8f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
 ; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3874,7 +3657,6 @@ define <vscale x 8 x float> @test_vlsseg2_mask_nxv8f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3892,7 +3674,6 @@ define <vscale x 2 x double> @test_vlsseg2_nxv2f64(ptr %base, i32 %offset, i32 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vlsseg2e64.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3907,7 +3688,6 @@ define <vscale x 2 x double> @test_vlsseg2_mask_nxv2f64(ptr %base, i32 %offset,
 ; CHECK-NEXT:    vlsseg2e64.v v6, (a0), a1
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlsseg2e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3925,7 +3705,6 @@ define <vscale x 2 x double> @test_vlsseg3_nxv2f64(ptr %base, i32 %offset, i32 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vlsseg3e64.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3941,7 +3720,6 @@ define <vscale x 2 x double> @test_vlsseg3_mask_nxv2f64(ptr %base, i32 %offset,
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlsseg3e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3959,7 +3737,6 @@ define <vscale x 2 x double> @test_vlsseg4_nxv2f64(ptr %base, i32 %offset, i32 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vlsseg4e64.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3976,7 +3753,6 @@ define <vscale x 2 x double> @test_vlsseg4_mask_nxv2f64(ptr %base, i32 %offset,
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlsseg4e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i32 %offset, i32 %vl)
@@ -3994,7 +3770,6 @@ define <vscale x 4 x half> @test_vlsseg2_nxv4f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4009,7 +3784,6 @@ define <vscale x 4 x half> @test_vlsseg2_mask_nxv4f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4027,7 +3801,6 @@ define <vscale x 4 x half> @test_vlsseg3_nxv4f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4043,7 +3816,6 @@ define <vscale x 4 x half> @test_vlsseg3_mask_nxv4f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4061,7 +3833,6 @@ define <vscale x 4 x half> @test_vlsseg4_nxv4f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4078,7 +3849,6 @@ define <vscale x 4 x half> @test_vlsseg4_mask_nxv4f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4096,7 +3866,6 @@ define <vscale x 4 x half> @test_vlsseg5_nxv4f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4114,7 +3883,6 @@ define <vscale x 4 x half> @test_vlsseg5_mask_nxv4f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4132,7 +3900,6 @@ define <vscale x 4 x half> @test_vlsseg6_nxv4f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4151,7 +3918,6 @@ define <vscale x 4 x half> @test_vlsseg6_mask_nxv4f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4169,7 +3935,6 @@ define <vscale x 4 x half> @test_vlsseg7_nxv4f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4189,7 +3954,6 @@ define <vscale x 4 x half> @test_vlsseg7_mask_nxv4f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4207,7 +3971,6 @@ define <vscale x 4 x half> @test_vlsseg8_nxv4f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4228,7 +3991,6 @@ define <vscale x 4 x half> @test_vlsseg8_mask_nxv4f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4246,7 +4008,6 @@ define <vscale x 2 x half> @test_vlsseg2_nxv2f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4261,7 +4022,6 @@ define <vscale x 2 x half> @test_vlsseg2_mask_nxv2f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4279,7 +4039,6 @@ define <vscale x 2 x half> @test_vlsseg3_nxv2f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4295,7 +4054,6 @@ define <vscale x 2 x half> @test_vlsseg3_mask_nxv2f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4313,7 +4071,6 @@ define <vscale x 2 x half> @test_vlsseg4_nxv2f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4330,7 +4087,6 @@ define <vscale x 2 x half> @test_vlsseg4_mask_nxv2f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4348,7 +4104,6 @@ define <vscale x 2 x half> @test_vlsseg5_nxv2f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4366,7 +4121,6 @@ define <vscale x 2 x half> @test_vlsseg5_mask_nxv2f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4384,7 +4138,6 @@ define <vscale x 2 x half> @test_vlsseg6_nxv2f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4403,7 +4156,6 @@ define <vscale x 2 x half> @test_vlsseg6_mask_nxv2f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4421,7 +4173,6 @@ define <vscale x 2 x half> @test_vlsseg7_nxv2f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4441,7 +4192,6 @@ define <vscale x 2 x half> @test_vlsseg7_mask_nxv2f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4459,7 +4209,6 @@ define <vscale x 2 x half> @test_vlsseg8_nxv2f16(ptr %base, i32 %offset, i32 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4480,7 +4229,6 @@ define <vscale x 2 x half> @test_vlsseg8_mask_nxv2f16(ptr %base, i32 %offset, i3
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4498,7 +4246,6 @@ define <vscale x 4 x float> @test_vlsseg2_nxv4f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4513,7 +4260,6 @@ define <vscale x 4 x float> @test_vlsseg2_mask_nxv4f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4531,7 +4277,6 @@ define <vscale x 4 x float> @test_vlsseg3_nxv4f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vlsseg3e32.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4547,7 +4292,6 @@ define <vscale x 4 x float> @test_vlsseg3_mask_nxv4f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlsseg3e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4565,7 +4309,6 @@ define <vscale x 4 x float> @test_vlsseg4_nxv4f32(ptr %base, i32 %offset, i32 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vlsseg4e32.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)
@@ -4582,7 +4325,6 @@ define <vscale x 4 x float> @test_vlsseg4_mask_nxv4f32(ptr %base, i32 %offset, i
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlsseg4e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i32 %offset, i32 %vl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll
index e621cbc802864..7d78be45e165e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vlsseg-rv64.ll
@@ -10,7 +10,6 @@ define <vscale x 16 x i16> @test_vlsseg2_nxv16i16(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -25,7 +24,6 @@ define <vscale x 16 x i16> @test_vlsseg2_mask_nxv16i16(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -43,7 +41,6 @@ define <vscale x 4 x i32> @test_vlsseg2_nxv4i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -58,7 +55,6 @@ define <vscale x 4 x i32> @test_vlsseg2_mask_nxv4i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg2.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -76,7 +72,6 @@ define <vscale x 4 x i32> @test_vlsseg3_nxv4i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vlsseg3e32.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -92,7 +87,6 @@ define <vscale x 4 x i32> @test_vlsseg3_mask_nxv4i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlsseg3e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg3.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -110,7 +104,6 @@ define <vscale x 4 x i32> @test_vlsseg4_nxv4i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vlsseg4e32.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -127,7 +120,6 @@ define <vscale x 4 x i32> @test_vlsseg4_mask_nxv4i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlsseg4e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vlsseg4.nxv4i32(<vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, <vscale x 4 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -145,7 +137,6 @@ define <vscale x 16 x i8> @test_vlsseg2_nxv16i8(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
 ; CHECK-NEXT:    vlsseg2e8.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -160,7 +151,6 @@ define <vscale x 16 x i8> @test_vlsseg2_mask_nxv16i8(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vlsseg2e8.v v6, (a0), a1
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlsseg2e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg2.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -178,7 +168,6 @@ define <vscale x 16 x i8> @test_vlsseg3_nxv16i8(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
 ; CHECK-NEXT:    vlsseg3e8.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -194,7 +183,6 @@ define <vscale x 16 x i8> @test_vlsseg3_mask_nxv16i8(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlsseg3e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg3.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -212,7 +200,6 @@ define <vscale x 16 x i8> @test_vlsseg4_nxv16i8(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
 ; CHECK-NEXT:    vlsseg4e8.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -229,7 +216,6 @@ define <vscale x 16 x i8> @test_vlsseg4_mask_nxv16i8(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlsseg4e8.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vlsseg4.nxv16i8(<vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, <vscale x 16 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -247,7 +233,6 @@ define <vscale x 1 x i64> @test_vlsseg2_nxv1i64(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg2e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -262,7 +247,6 @@ define <vscale x 1 x i64> @test_vlsseg2_mask_nxv1i64(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vlsseg2e64.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg2.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -280,7 +264,6 @@ define <vscale x 1 x i64> @test_vlsseg3_nxv1i64(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg3e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -296,7 +279,6 @@ define <vscale x 1 x i64> @test_vlsseg3_mask_nxv1i64(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg3.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -314,7 +296,6 @@ define <vscale x 1 x i64> @test_vlsseg4_nxv1i64(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg4e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -331,7 +312,6 @@ define <vscale x 1 x i64> @test_vlsseg4_mask_nxv1i64(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg4.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -349,7 +329,6 @@ define <vscale x 1 x i64> @test_vlsseg5_nxv1i64(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg5e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -367,7 +346,6 @@ define <vscale x 1 x i64> @test_vlsseg5_mask_nxv1i64(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg5.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -385,7 +363,6 @@ define <vscale x 1 x i64> @test_vlsseg6_nxv1i64(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg6e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -404,7 +381,6 @@ define <vscale x 1 x i64> @test_vlsseg6_mask_nxv1i64(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg6.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -422,7 +398,6 @@ define <vscale x 1 x i64> @test_vlsseg7_nxv1i64(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg7e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -442,7 +417,6 @@ define <vscale x 1 x i64> @test_vlsseg7_mask_nxv1i64(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg7.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -460,7 +434,6 @@ define <vscale x 1 x i64> @test_vlsseg8_nxv1i64(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg8e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -481,7 +454,6 @@ define <vscale x 1 x i64> @test_vlsseg8_mask_nxv1i64(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vlsseg8.nxv1i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef ,<vscale x 1 x i64> undef, <vscale x 1 x i64> undef, <vscale x 1 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -499,7 +471,6 @@ define <vscale x 1 x i32> @test_vlsseg2_nxv1i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -514,7 +485,6 @@ define <vscale x 1 x i32> @test_vlsseg2_mask_nxv1i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg2.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -532,7 +502,6 @@ define <vscale x 1 x i32> @test_vlsseg3_nxv1i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -548,7 +517,6 @@ define <vscale x 1 x i32> @test_vlsseg3_mask_nxv1i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg3.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -566,7 +534,6 @@ define <vscale x 1 x i32> @test_vlsseg4_nxv1i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -583,7 +550,6 @@ define <vscale x 1 x i32> @test_vlsseg4_mask_nxv1i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg4.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -601,7 +567,6 @@ define <vscale x 1 x i32> @test_vlsseg5_nxv1i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -619,7 +584,6 @@ define <vscale x 1 x i32> @test_vlsseg5_mask_nxv1i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg5.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -637,7 +601,6 @@ define <vscale x 1 x i32> @test_vlsseg6_nxv1i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -656,7 +619,6 @@ define <vscale x 1 x i32> @test_vlsseg6_mask_nxv1i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg6.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -674,7 +636,6 @@ define <vscale x 1 x i32> @test_vlsseg7_nxv1i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -694,7 +655,6 @@ define <vscale x 1 x i32> @test_vlsseg7_mask_nxv1i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg7.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -712,7 +672,6 @@ define <vscale x 1 x i32> @test_vlsseg8_nxv1i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -733,7 +692,6 @@ define <vscale x 1 x i32> @test_vlsseg8_mask_nxv1i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vlsseg8.nxv1i32(<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef ,<vscale x 1 x i32> undef, <vscale x 1 x i32> undef, <vscale x 1 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -751,7 +709,6 @@ define <vscale x 8 x i16> @test_vlsseg2_nxv8i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -766,7 +723,6 @@ define <vscale x 8 x i16> @test_vlsseg2_mask_nxv8i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg2.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -784,7 +740,6 @@ define <vscale x 8 x i16> @test_vlsseg3_nxv8i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vlsseg3e16.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -800,7 +755,6 @@ define <vscale x 8 x i16> @test_vlsseg3_mask_nxv8i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlsseg3e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg3.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -818,7 +772,6 @@ define <vscale x 8 x i16> @test_vlsseg4_nxv8i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vlsseg4e16.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -835,7 +788,6 @@ define <vscale x 8 x i16> @test_vlsseg4_mask_nxv8i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlsseg4e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vlsseg4.nxv8i16(<vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, <vscale x 8 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -853,7 +805,6 @@ define <vscale x 4 x i8> @test_vlsseg2_nxv4i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -868,7 +819,6 @@ define <vscale x 4 x i8> @test_vlsseg2_mask_nxv4i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg2.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -886,7 +836,6 @@ define <vscale x 4 x i8> @test_vlsseg3_nxv4i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -902,7 +851,6 @@ define <vscale x 4 x i8> @test_vlsseg3_mask_nxv4i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg3.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -920,7 +868,6 @@ define <vscale x 4 x i8> @test_vlsseg4_nxv4i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -937,7 +884,6 @@ define <vscale x 4 x i8> @test_vlsseg4_mask_nxv4i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg4.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -955,7 +901,6 @@ define <vscale x 4 x i8> @test_vlsseg5_nxv4i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -973,7 +918,6 @@ define <vscale x 4 x i8> @test_vlsseg5_mask_nxv4i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg5.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -991,7 +935,6 @@ define <vscale x 4 x i8> @test_vlsseg6_nxv4i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1010,7 +953,6 @@ define <vscale x 4 x i8> @test_vlsseg6_mask_nxv4i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg6.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1028,7 +970,6 @@ define <vscale x 4 x i8> @test_vlsseg7_nxv4i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1048,7 +989,6 @@ define <vscale x 4 x i8> @test_vlsseg7_mask_nxv4i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg7.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1066,7 +1006,6 @@ define <vscale x 4 x i8> @test_vlsseg8_nxv4i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1087,7 +1026,6 @@ define <vscale x 4 x i8> @test_vlsseg8_mask_nxv4i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vlsseg8.nxv4i8(<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef ,<vscale x 4 x i8> undef, <vscale x 4 x i8> undef, <vscale x 4 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1105,7 +1043,6 @@ define <vscale x 1 x i16> @test_vlsseg2_nxv1i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1120,7 +1057,6 @@ define <vscale x 1 x i16> @test_vlsseg2_mask_nxv1i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg2.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1138,7 +1074,6 @@ define <vscale x 1 x i16> @test_vlsseg3_nxv1i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1154,7 +1089,6 @@ define <vscale x 1 x i16> @test_vlsseg3_mask_nxv1i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg3.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1172,7 +1106,6 @@ define <vscale x 1 x i16> @test_vlsseg4_nxv1i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1189,7 +1122,6 @@ define <vscale x 1 x i16> @test_vlsseg4_mask_nxv1i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg4.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1207,7 +1139,6 @@ define <vscale x 1 x i16> @test_vlsseg5_nxv1i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1225,7 +1156,6 @@ define <vscale x 1 x i16> @test_vlsseg5_mask_nxv1i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg5.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1243,7 +1173,6 @@ define <vscale x 1 x i16> @test_vlsseg6_nxv1i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1262,7 +1191,6 @@ define <vscale x 1 x i16> @test_vlsseg6_mask_nxv1i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg6.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1280,7 +1208,6 @@ define <vscale x 1 x i16> @test_vlsseg7_nxv1i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1300,7 +1227,6 @@ define <vscale x 1 x i16> @test_vlsseg7_mask_nxv1i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg7.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1318,7 +1244,6 @@ define <vscale x 1 x i16> @test_vlsseg8_nxv1i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1339,7 +1264,6 @@ define <vscale x 1 x i16> @test_vlsseg8_mask_nxv1i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vlsseg8.nxv1i16(<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef ,<vscale x 1 x i16> undef, <vscale x 1 x i16> undef, <vscale x 1 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1357,7 +1281,6 @@ define <vscale x 2 x i32> @test_vlsseg2_nxv2i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1372,7 +1295,6 @@ define <vscale x 2 x i32> @test_vlsseg2_mask_nxv2i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg2.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1390,7 +1312,6 @@ define <vscale x 2 x i32> @test_vlsseg3_nxv2i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1406,7 +1327,6 @@ define <vscale x 2 x i32> @test_vlsseg3_mask_nxv2i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg3.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1424,7 +1344,6 @@ define <vscale x 2 x i32> @test_vlsseg4_nxv2i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1441,7 +1360,6 @@ define <vscale x 2 x i32> @test_vlsseg4_mask_nxv2i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg4.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1459,7 +1377,6 @@ define <vscale x 2 x i32> @test_vlsseg5_nxv2i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1477,7 +1394,6 @@ define <vscale x 2 x i32> @test_vlsseg5_mask_nxv2i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg5.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1495,7 +1411,6 @@ define <vscale x 2 x i32> @test_vlsseg6_nxv2i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1514,7 +1429,6 @@ define <vscale x 2 x i32> @test_vlsseg6_mask_nxv2i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg6.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1532,7 +1446,6 @@ define <vscale x 2 x i32> @test_vlsseg7_nxv2i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1552,7 +1465,6 @@ define <vscale x 2 x i32> @test_vlsseg7_mask_nxv2i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg7.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1570,7 +1482,6 @@ define <vscale x 2 x i32> @test_vlsseg8_nxv2i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1591,7 +1502,6 @@ define <vscale x 2 x i32> @test_vlsseg8_mask_nxv2i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vlsseg8.nxv2i32(<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef ,<vscale x 2 x i32> undef, <vscale x 2 x i32> undef, <vscale x 2 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1609,7 +1519,6 @@ define <vscale x 8 x i8> @test_vlsseg2_nxv8i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1624,7 +1533,6 @@ define <vscale x 8 x i8> @test_vlsseg2_mask_nxv8i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg2.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1642,7 +1550,6 @@ define <vscale x 8 x i8> @test_vlsseg3_nxv8i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1658,7 +1565,6 @@ define <vscale x 8 x i8> @test_vlsseg3_mask_nxv8i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg3.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1676,7 +1582,6 @@ define <vscale x 8 x i8> @test_vlsseg4_nxv8i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1693,7 +1598,6 @@ define <vscale x 8 x i8> @test_vlsseg4_mask_nxv8i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg4.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1711,7 +1615,6 @@ define <vscale x 8 x i8> @test_vlsseg5_nxv8i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1729,7 +1632,6 @@ define <vscale x 8 x i8> @test_vlsseg5_mask_nxv8i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg5.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1747,7 +1649,6 @@ define <vscale x 8 x i8> @test_vlsseg6_nxv8i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1766,7 +1667,6 @@ define <vscale x 8 x i8> @test_vlsseg6_mask_nxv8i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg6.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1784,7 +1684,6 @@ define <vscale x 8 x i8> @test_vlsseg7_nxv8i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1804,7 +1703,6 @@ define <vscale x 8 x i8> @test_vlsseg7_mask_nxv8i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg7.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1822,7 +1720,6 @@ define <vscale x 8 x i8> @test_vlsseg8_nxv8i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1843,7 +1740,6 @@ define <vscale x 8 x i8> @test_vlsseg8_mask_nxv8i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vlsseg8.nxv8i8(<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef ,<vscale x 8 x i8> undef, <vscale x 8 x i8> undef, <vscale x 8 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1861,7 +1757,6 @@ define <vscale x 4 x i64> @test_vlsseg2_nxv4i64(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; CHECK-NEXT:    vlsseg2e64.v v4, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1876,7 +1771,6 @@ define <vscale x 4 x i64> @test_vlsseg2_mask_nxv4i64(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vlsseg2e64.v v4, (a0), a1
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlsseg2e64.v v4, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vlsseg2.nxv4i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1894,7 +1788,6 @@ define <vscale x 4 x i16> @test_vlsseg2_nxv4i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1909,7 +1802,6 @@ define <vscale x 4 x i16> @test_vlsseg2_mask_nxv4i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg2.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1927,7 +1819,6 @@ define <vscale x 4 x i16> @test_vlsseg3_nxv4i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1943,7 +1834,6 @@ define <vscale x 4 x i16> @test_vlsseg3_mask_nxv4i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg3.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1961,7 +1851,6 @@ define <vscale x 4 x i16> @test_vlsseg4_nxv4i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1978,7 +1867,6 @@ define <vscale x 4 x i16> @test_vlsseg4_mask_nxv4i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg4.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -1996,7 +1884,6 @@ define <vscale x 4 x i16> @test_vlsseg5_nxv4i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2014,7 +1901,6 @@ define <vscale x 4 x i16> @test_vlsseg5_mask_nxv4i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg5.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2032,7 +1918,6 @@ define <vscale x 4 x i16> @test_vlsseg6_nxv4i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2051,7 +1936,6 @@ define <vscale x 4 x i16> @test_vlsseg6_mask_nxv4i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg6.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2069,7 +1953,6 @@ define <vscale x 4 x i16> @test_vlsseg7_nxv4i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2089,7 +1972,6 @@ define <vscale x 4 x i16> @test_vlsseg7_mask_nxv4i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg7.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2107,7 +1989,6 @@ define <vscale x 4 x i16> @test_vlsseg8_nxv4i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2128,7 +2009,6 @@ define <vscale x 4 x i16> @test_vlsseg8_mask_nxv4i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vlsseg8.nxv4i16(<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef ,<vscale x 4 x i16> undef, <vscale x 4 x i16> undef, <vscale x 4 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2146,7 +2026,6 @@ define <vscale x 1 x i8> @test_vlsseg2_nxv1i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2161,7 +2040,6 @@ define <vscale x 1 x i8> @test_vlsseg2_mask_nxv1i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg2.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2179,7 +2057,6 @@ define <vscale x 1 x i8> @test_vlsseg3_nxv1i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2195,7 +2072,6 @@ define <vscale x 1 x i8> @test_vlsseg3_mask_nxv1i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg3.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2213,7 +2089,6 @@ define <vscale x 1 x i8> @test_vlsseg4_nxv1i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2230,7 +2105,6 @@ define <vscale x 1 x i8> @test_vlsseg4_mask_nxv1i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg4.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2248,7 +2122,6 @@ define <vscale x 1 x i8> @test_vlsseg5_nxv1i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2266,7 +2139,6 @@ define <vscale x 1 x i8> @test_vlsseg5_mask_nxv1i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg5.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2284,7 +2156,6 @@ define <vscale x 1 x i8> @test_vlsseg6_nxv1i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2303,7 +2174,6 @@ define <vscale x 1 x i8> @test_vlsseg6_mask_nxv1i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg6.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2321,7 +2191,6 @@ define <vscale x 1 x i8> @test_vlsseg7_nxv1i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2341,7 +2210,6 @@ define <vscale x 1 x i8> @test_vlsseg7_mask_nxv1i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg7.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2359,7 +2227,6 @@ define <vscale x 1 x i8> @test_vlsseg8_nxv1i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2380,7 +2247,6 @@ define <vscale x 1 x i8> @test_vlsseg8_mask_nxv1i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vlsseg8.nxv1i8(<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef ,<vscale x 1 x i8> undef, <vscale x 1 x i8> undef, <vscale x 1 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2398,7 +2264,6 @@ define <vscale x 2 x i8> @test_vlsseg2_nxv2i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2413,7 +2278,6 @@ define <vscale x 2 x i8> @test_vlsseg2_mask_nxv2i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg2.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2431,7 +2295,6 @@ define <vscale x 2 x i8> @test_vlsseg3_nxv2i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2447,7 +2310,6 @@ define <vscale x 2 x i8> @test_vlsseg3_mask_nxv2i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg3.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2465,7 +2327,6 @@ define <vscale x 2 x i8> @test_vlsseg4_nxv2i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2482,7 +2343,6 @@ define <vscale x 2 x i8> @test_vlsseg4_mask_nxv2i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg4.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2500,7 +2360,6 @@ define <vscale x 2 x i8> @test_vlsseg5_nxv2i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2518,7 +2377,6 @@ define <vscale x 2 x i8> @test_vlsseg5_mask_nxv2i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg5.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2536,7 +2394,6 @@ define <vscale x 2 x i8> @test_vlsseg6_nxv2i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2555,7 +2412,6 @@ define <vscale x 2 x i8> @test_vlsseg6_mask_nxv2i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg6.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2573,7 +2429,6 @@ define <vscale x 2 x i8> @test_vlsseg7_nxv2i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2593,7 +2448,6 @@ define <vscale x 2 x i8> @test_vlsseg7_mask_nxv2i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg7.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2611,7 +2465,6 @@ define <vscale x 2 x i8> @test_vlsseg8_nxv2i8(ptr %base, i64 %offset, i64 %vl) {
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2632,7 +2485,6 @@ define <vscale x 2 x i8> @test_vlsseg8_mask_nxv2i8(ptr %base, i64 %offset, i64 %
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e8.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vlsseg8.nxv2i8(<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef ,<vscale x 2 x i8> undef, <vscale x 2 x i8> undef, <vscale x 2 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2650,7 +2502,6 @@ define <vscale x 8 x i32> @test_vlsseg2_nxv8i32(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
 ; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2665,7 +2516,6 @@ define <vscale x 8 x i32> @test_vlsseg2_mask_nxv8i32(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vlsseg2.nxv8i32(<vscale x 8 x i32> undef, <vscale x 8 x i32> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2683,7 +2533,6 @@ define <vscale x 32 x i8> @test_vlsseg2_nxv32i8(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, ma
 ; CHECK-NEXT:    vlsseg2e8.v v4, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2698,7 +2547,6 @@ define <vscale x 32 x i8> @test_vlsseg2_mask_nxv32i8(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vlsseg2e8.v v4, (a0), a1
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlsseg2e8.v v4, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vlsseg2.nxv32i8(<vscale x 32 x i8> undef, <vscale x 32 x i8> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2716,7 +2564,6 @@ define <vscale x 2 x i16> @test_vlsseg2_nxv2i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2731,7 +2578,6 @@ define <vscale x 2 x i16> @test_vlsseg2_mask_nxv2i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg2.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2749,7 +2595,6 @@ define <vscale x 2 x i16> @test_vlsseg3_nxv2i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2765,7 +2610,6 @@ define <vscale x 2 x i16> @test_vlsseg3_mask_nxv2i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg3.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2783,7 +2627,6 @@ define <vscale x 2 x i16> @test_vlsseg4_nxv2i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2800,7 +2643,6 @@ define <vscale x 2 x i16> @test_vlsseg4_mask_nxv2i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg4.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2818,7 +2660,6 @@ define <vscale x 2 x i16> @test_vlsseg5_nxv2i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2836,7 +2677,6 @@ define <vscale x 2 x i16> @test_vlsseg5_mask_nxv2i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg5.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2854,7 +2694,6 @@ define <vscale x 2 x i16> @test_vlsseg6_nxv2i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2873,7 +2712,6 @@ define <vscale x 2 x i16> @test_vlsseg6_mask_nxv2i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg6.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2891,7 +2729,6 @@ define <vscale x 2 x i16> @test_vlsseg7_nxv2i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2911,7 +2748,6 @@ define <vscale x 2 x i16> @test_vlsseg7_mask_nxv2i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg7.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2929,7 +2765,6 @@ define <vscale x 2 x i16> @test_vlsseg8_nxv2i16(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2950,7 +2785,6 @@ define <vscale x 2 x i16> @test_vlsseg8_mask_nxv2i16(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vlsseg8.nxv2i16(<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef ,<vscale x 2 x i16> undef, <vscale x 2 x i16> undef, <vscale x 2 x i16> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2968,7 +2802,6 @@ define <vscale x 2 x i64> @test_vlsseg2_nxv2i64(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vlsseg2e64.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -2983,7 +2816,6 @@ define <vscale x 2 x i64> @test_vlsseg2_mask_nxv2i64(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vlsseg2e64.v v6, (a0), a1
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlsseg2e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg2.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3001,7 +2833,6 @@ define <vscale x 2 x i64> @test_vlsseg3_nxv2i64(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vlsseg3e64.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3017,7 +2848,6 @@ define <vscale x 2 x i64> @test_vlsseg3_mask_nxv2i64(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlsseg3e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg3.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3035,7 +2865,6 @@ define <vscale x 2 x i64> @test_vlsseg4_nxv2i64(ptr %base, i64 %offset, i64 %vl)
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vlsseg4e64.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3052,7 +2881,6 @@ define <vscale x 2 x i64> @test_vlsseg4_mask_nxv2i64(ptr %base, i64 %offset, i64
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlsseg4e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vlsseg4.nxv2i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, <vscale x 2 x i64> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3070,7 +2898,6 @@ define <vscale x 16 x half> @test_vlsseg2_nxv16f16(ptr %base, i64 %offset, i64 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3085,7 +2912,6 @@ define <vscale x 16 x half> @test_vlsseg2_mask_nxv16f16(ptr %base, i64 %offset,
 ; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vlsseg2.nxv16f16(<vscale x 16 x half> undef, <vscale x 16 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3103,7 +2929,6 @@ define <vscale x 4 x double> @test_vlsseg2_nxv4f64(ptr %base, i64 %offset, i64 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; CHECK-NEXT:    vlsseg2e64.v v4, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3118,7 +2943,6 @@ define <vscale x 4 x double> @test_vlsseg2_mask_nxv4f64(ptr %base, i64 %offset,
 ; CHECK-NEXT:    vlsseg2e64.v v4, (a0), a1
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlsseg2e64.v v4, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vlsseg2.nxv4f64(<vscale x 4 x double> undef, <vscale x 4 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3136,7 +2960,6 @@ define <vscale x 1 x double> @test_vlsseg2_nxv1f64(ptr %base, i64 %offset, i64 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg2e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3151,7 +2974,6 @@ define <vscale x 1 x double> @test_vlsseg2_mask_nxv1f64(ptr %base, i64 %offset,
 ; CHECK-NEXT:    vlsseg2e64.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg2.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3169,7 +2991,6 @@ define <vscale x 1 x double> @test_vlsseg3_nxv1f64(ptr %base, i64 %offset, i64 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg3e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3185,7 +3006,6 @@ define <vscale x 1 x double> @test_vlsseg3_mask_nxv1f64(ptr %base, i64 %offset,
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg3.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3203,7 +3023,6 @@ define <vscale x 1 x double> @test_vlsseg4_nxv1f64(ptr %base, i64 %offset, i64 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg4e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3220,7 +3039,6 @@ define <vscale x 1 x double> @test_vlsseg4_mask_nxv1f64(ptr %base, i64 %offset,
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg4.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3238,7 +3056,6 @@ define <vscale x 1 x double> @test_vlsseg5_nxv1f64(ptr %base, i64 %offset, i64 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg5e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3256,7 +3073,6 @@ define <vscale x 1 x double> @test_vlsseg5_mask_nxv1f64(ptr %base, i64 %offset,
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg5.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3274,7 +3090,6 @@ define <vscale x 1 x double> @test_vlsseg6_nxv1f64(ptr %base, i64 %offset, i64 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg6e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3293,7 +3108,6 @@ define <vscale x 1 x double> @test_vlsseg6_mask_nxv1f64(ptr %base, i64 %offset,
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg6.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3311,7 +3125,6 @@ define <vscale x 1 x double> @test_vlsseg7_nxv1f64(ptr %base, i64 %offset, i64 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg7e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3331,7 +3144,6 @@ define <vscale x 1 x double> @test_vlsseg7_mask_nxv1f64(ptr %base, i64 %offset,
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg7.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3349,7 +3161,6 @@ define <vscale x 1 x double> @test_vlsseg8_nxv1f64(ptr %base, i64 %offset, i64 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vlsseg8e64.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3370,7 +3181,6 @@ define <vscale x 1 x double> @test_vlsseg8_mask_nxv1f64(ptr %base, i64 %offset,
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e64.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vlsseg8.nxv1f64(<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef ,<vscale x 1 x double> undef, <vscale x 1 x double> undef, <vscale x 1 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3388,7 +3198,6 @@ define <vscale x 2 x float> @test_vlsseg2_nxv2f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3403,7 +3212,6 @@ define <vscale x 2 x float> @test_vlsseg2_mask_nxv2f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg2.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3421,7 +3229,6 @@ define <vscale x 2 x float> @test_vlsseg3_nxv2f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3437,7 +3244,6 @@ define <vscale x 2 x float> @test_vlsseg3_mask_nxv2f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg3.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3455,7 +3261,6 @@ define <vscale x 2 x float> @test_vlsseg4_nxv2f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3472,7 +3277,6 @@ define <vscale x 2 x float> @test_vlsseg4_mask_nxv2f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg4.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3490,7 +3294,6 @@ define <vscale x 2 x float> @test_vlsseg5_nxv2f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3508,7 +3311,6 @@ define <vscale x 2 x float> @test_vlsseg5_mask_nxv2f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg5.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3526,7 +3328,6 @@ define <vscale x 2 x float> @test_vlsseg6_nxv2f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3545,7 +3346,6 @@ define <vscale x 2 x float> @test_vlsseg6_mask_nxv2f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg6.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3563,7 +3363,6 @@ define <vscale x 2 x float> @test_vlsseg7_nxv2f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3583,7 +3382,6 @@ define <vscale x 2 x float> @test_vlsseg7_mask_nxv2f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg7.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3601,7 +3399,6 @@ define <vscale x 2 x float> @test_vlsseg8_nxv2f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3622,7 +3419,6 @@ define <vscale x 2 x float> @test_vlsseg8_mask_nxv2f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vlsseg8.nxv2f32(<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef ,<vscale x 2 x float> undef, <vscale x 2 x float> undef, <vscale x 2 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3640,7 +3436,6 @@ define <vscale x 1 x half> @test_vlsseg2_nxv1f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3655,7 +3450,6 @@ define <vscale x 1 x half> @test_vlsseg2_mask_nxv1f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg2.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3673,7 +3467,6 @@ define <vscale x 1 x half> @test_vlsseg3_nxv1f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3689,7 +3482,6 @@ define <vscale x 1 x half> @test_vlsseg3_mask_nxv1f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg3.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3707,7 +3499,6 @@ define <vscale x 1 x half> @test_vlsseg4_nxv1f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3724,7 +3515,6 @@ define <vscale x 1 x half> @test_vlsseg4_mask_nxv1f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg4.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3742,7 +3532,6 @@ define <vscale x 1 x half> @test_vlsseg5_nxv1f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3760,7 +3549,6 @@ define <vscale x 1 x half> @test_vlsseg5_mask_nxv1f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg5.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3778,7 +3566,6 @@ define <vscale x 1 x half> @test_vlsseg6_nxv1f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3797,7 +3584,6 @@ define <vscale x 1 x half> @test_vlsseg6_mask_nxv1f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg6.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3815,7 +3601,6 @@ define <vscale x 1 x half> @test_vlsseg7_nxv1f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3835,7 +3620,6 @@ define <vscale x 1 x half> @test_vlsseg7_mask_nxv1f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg7.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3853,7 +3637,6 @@ define <vscale x 1 x half> @test_vlsseg8_nxv1f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3874,7 +3657,6 @@ define <vscale x 1 x half> @test_vlsseg8_mask_nxv1f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vlsseg8.nxv1f16(<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef ,<vscale x 1 x half> undef, <vscale x 1 x half> undef, <vscale x 1 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3892,7 +3674,6 @@ define <vscale x 1 x float> @test_vlsseg2_nxv1f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3907,7 +3688,6 @@ define <vscale x 1 x float> @test_vlsseg2_mask_nxv1f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg2.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3925,7 +3705,6 @@ define <vscale x 1 x float> @test_vlsseg3_nxv1f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3941,7 +3720,6 @@ define <vscale x 1 x float> @test_vlsseg3_mask_nxv1f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg3.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3959,7 +3737,6 @@ define <vscale x 1 x float> @test_vlsseg4_nxv1f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3976,7 +3753,6 @@ define <vscale x 1 x float> @test_vlsseg4_mask_nxv1f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg4.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -3994,7 +3770,6 @@ define <vscale x 1 x float> @test_vlsseg5_nxv1f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4012,7 +3787,6 @@ define <vscale x 1 x float> @test_vlsseg5_mask_nxv1f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg5.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4030,7 +3804,6 @@ define <vscale x 1 x float> @test_vlsseg6_nxv1f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4049,7 +3822,6 @@ define <vscale x 1 x float> @test_vlsseg6_mask_nxv1f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg6.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4067,7 +3839,6 @@ define <vscale x 1 x float> @test_vlsseg7_nxv1f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4087,7 +3858,6 @@ define <vscale x 1 x float> @test_vlsseg7_mask_nxv1f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg7.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4105,7 +3875,6 @@ define <vscale x 1 x float> @test_vlsseg8_nxv1f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4126,7 +3895,6 @@ define <vscale x 1 x float> @test_vlsseg8_mask_nxv1f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e32.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vlsseg8.nxv1f32(<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef ,<vscale x 1 x float> undef, <vscale x 1 x float> undef, <vscale x 1 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4144,7 +3912,6 @@ define <vscale x 8 x half> @test_vlsseg2_nxv8f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4159,7 +3926,6 @@ define <vscale x 8 x half> @test_vlsseg2_mask_nxv8f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlsseg2e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg2.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4177,7 +3943,6 @@ define <vscale x 8 x half> @test_vlsseg3_nxv8f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vlsseg3e16.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4193,7 +3958,6 @@ define <vscale x 8 x half> @test_vlsseg3_mask_nxv8f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlsseg3e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg3.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4211,7 +3975,6 @@ define <vscale x 8 x half> @test_vlsseg4_nxv8f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vlsseg4e16.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4228,7 +3991,6 @@ define <vscale x 8 x half> @test_vlsseg4_mask_nxv8f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlsseg4e16.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vlsseg4.nxv8f16(<vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, <vscale x 8 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4246,7 +4008,6 @@ define <vscale x 8 x float> @test_vlsseg2_nxv8f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
 ; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4261,7 +4022,6 @@ define <vscale x 8 x float> @test_vlsseg2_mask_nxv8f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlsseg2e32.v v4, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vlsseg2.nxv8f32(<vscale x 8 x float> undef, <vscale x 8 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4279,7 +4039,6 @@ define <vscale x 2 x double> @test_vlsseg2_nxv2f64(ptr %base, i64 %offset, i64 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vlsseg2e64.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4294,7 +4053,6 @@ define <vscale x 2 x double> @test_vlsseg2_mask_nxv2f64(ptr %base, i64 %offset,
 ; CHECK-NEXT:    vlsseg2e64.v v6, (a0), a1
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlsseg2e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg2.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4312,7 +4070,6 @@ define <vscale x 2 x double> @test_vlsseg3_nxv2f64(ptr %base, i64 %offset, i64 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vlsseg3e64.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4328,7 +4085,6 @@ define <vscale x 2 x double> @test_vlsseg3_mask_nxv2f64(ptr %base, i64 %offset,
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlsseg3e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg3.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4346,7 +4102,6 @@ define <vscale x 2 x double> @test_vlsseg4_nxv2f64(ptr %base, i64 %offset, i64 %
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vlsseg4e64.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4363,7 +4118,6 @@ define <vscale x 2 x double> @test_vlsseg4_mask_nxv2f64(ptr %base, i64 %offset,
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlsseg4e64.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vlsseg4.nxv2f64(<vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, <vscale x 2 x double> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4381,7 +4135,6 @@ define <vscale x 4 x half> @test_vlsseg2_nxv4f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4396,7 +4149,6 @@ define <vscale x 4 x half> @test_vlsseg2_mask_nxv4f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg2.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4414,7 +4166,6 @@ define <vscale x 4 x half> @test_vlsseg3_nxv4f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4430,7 +4181,6 @@ define <vscale x 4 x half> @test_vlsseg3_mask_nxv4f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg3.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4448,7 +4198,6 @@ define <vscale x 4 x half> @test_vlsseg4_nxv4f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4465,7 +4214,6 @@ define <vscale x 4 x half> @test_vlsseg4_mask_nxv4f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg4.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4483,7 +4231,6 @@ define <vscale x 4 x half> @test_vlsseg5_nxv4f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4501,7 +4248,6 @@ define <vscale x 4 x half> @test_vlsseg5_mask_nxv4f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg5.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4519,7 +4265,6 @@ define <vscale x 4 x half> @test_vlsseg6_nxv4f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4538,7 +4283,6 @@ define <vscale x 4 x half> @test_vlsseg6_mask_nxv4f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg6.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4556,7 +4300,6 @@ define <vscale x 4 x half> @test_vlsseg7_nxv4f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4576,7 +4319,6 @@ define <vscale x 4 x half> @test_vlsseg7_mask_nxv4f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg7.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4594,7 +4336,6 @@ define <vscale x 4 x half> @test_vlsseg8_nxv4f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4615,7 +4356,6 @@ define <vscale x 4 x half> @test_vlsseg8_mask_nxv4f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vlsseg8.nxv4f16(<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef ,<vscale x 4 x half> undef, <vscale x 4 x half> undef, <vscale x 4 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4633,7 +4373,6 @@ define <vscale x 2 x half> @test_vlsseg2_nxv2f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4648,7 +4387,6 @@ define <vscale x 2 x half> @test_vlsseg2_mask_nxv2f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vlsseg2e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg2.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4666,7 +4404,6 @@ define <vscale x 2 x half> @test_vlsseg3_nxv2f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4682,7 +4419,6 @@ define <vscale x 2 x half> @test_vlsseg3_mask_nxv2f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v8, v7
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vlsseg3e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg3.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4700,7 +4436,6 @@ define <vscale x 2 x half> @test_vlsseg4_nxv2f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4717,7 +4452,6 @@ define <vscale x 2 x half> @test_vlsseg4_mask_nxv2f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v9, v7
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vlsseg4e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg4.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4735,7 +4469,6 @@ define <vscale x 2 x half> @test_vlsseg5_nxv2f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4753,7 +4486,6 @@ define <vscale x 2 x half> @test_vlsseg5_mask_nxv2f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v10, v7
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vlsseg5e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg5.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4771,7 +4503,6 @@ define <vscale x 2 x half> @test_vlsseg6_nxv2f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4790,7 +4521,6 @@ define <vscale x 2 x half> @test_vlsseg6_mask_nxv2f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v11, v7
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vlsseg6e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg6.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4808,7 +4538,6 @@ define <vscale x 2 x half> @test_vlsseg7_nxv2f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4828,7 +4557,6 @@ define <vscale x 2 x half> @test_vlsseg7_mask_nxv2f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v12, v7
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vlsseg7e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg7.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4846,7 +4574,6 @@ define <vscale x 2 x half> @test_vlsseg8_nxv2f16(ptr %base, i64 %offset, i64 %vl
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4867,7 +4594,6 @@ define <vscale x 2 x half> @test_vlsseg8_mask_nxv2f16(ptr %base, i64 %offset, i6
 ; CHECK-NEXT:    vmv1r.v v13, v7
 ; CHECK-NEXT:    vmv1r.v v14, v7
 ; CHECK-NEXT:    vlsseg8e16.v v7, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vlsseg8.nxv2f16(<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef ,<vscale x 2 x half> undef, <vscale x 2 x half> undef, <vscale x 2 x half> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4885,7 +4611,6 @@ define <vscale x 4 x float> @test_vlsseg2_nxv4f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4900,7 +4625,6 @@ define <vscale x 4 x float> @test_vlsseg2_mask_nxv4f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vlsseg2e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg2.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4918,7 +4642,6 @@ define <vscale x 4 x float> @test_vlsseg3_nxv4f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vlsseg3e32.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4934,7 +4657,6 @@ define <vscale x 4 x float> @test_vlsseg3_mask_nxv4f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vmv2r.v v8, v6
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vlsseg3e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg3.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4952,7 +4674,6 @@ define <vscale x 4 x float> @test_vlsseg4_nxv4f32(ptr %base, i64 %offset, i64 %v
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vlsseg4e32.v v6, (a0), a1
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)
@@ -4969,7 +4690,6 @@ define <vscale x 4 x float> @test_vlsseg4_mask_nxv4f32(ptr %base, i64 %offset, i
 ; CHECK-NEXT:    vmv2r.v v10, v6
 ; CHECK-NEXT:    vmv2r.v v12, v6
 ; CHECK-NEXT:    vlsseg4e32.v v6, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vlsseg4.nxv4f32(<vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, <vscale x 4 x float> undef, ptr %base, i64 %offset, i64 %vl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll
index 5fdf0facb6f76..5d094264d7d45 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv32.ll
@@ -24,7 +24,6 @@ define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -54,7 +53,6 @@ define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -84,7 +82,6 @@ define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -114,7 +111,6 @@ define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -144,7 +140,6 @@ define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -174,7 +169,6 @@ define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -201,12 +195,11 @@ entry:
 define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -233,12 +226,11 @@ entry:
 define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -265,12 +257,11 @@ entry:
 define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -298,9 +289,9 @@ define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -331,9 +322,9 @@ define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -364,9 +355,9 @@ define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -397,10 +388,10 @@ define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -431,10 +422,10 @@ define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -465,10 +456,10 @@ define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -499,11 +490,11 @@ define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -534,11 +525,11 @@ define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -569,11 +560,11 @@ define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -604,12 +595,12 @@ define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -640,12 +631,12 @@ define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -676,12 +667,12 @@ define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -712,13 +703,13 @@ define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -749,13 +740,13 @@ define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -786,13 +777,13 @@ define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -825,7 +816,6 @@ define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -855,7 +845,6 @@ define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -885,7 +874,6 @@ define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -913,10 +901,9 @@ define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8
 ; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vluxseg3ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -943,12 +930,11 @@ entry:
 define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -976,10 +962,9 @@ define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8
 ; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vluxseg3ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -1006,13 +991,12 @@ entry:
 define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vluxseg4ei16.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
+; CHECK-NEXT:    vluxseg4ei16.v v6, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -1040,9 +1024,9 @@ define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -1073,11 +1057,10 @@ define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8
 ; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -1107,7 +1090,6 @@ define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -1137,7 +1119,6 @@ define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -1167,7 +1148,6 @@ define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -1194,12 +1174,11 @@ entry:
 define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -1226,12 +1205,11 @@ entry:
 define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -1258,12 +1236,11 @@ entry:
 define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -1291,9 +1268,9 @@ define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1324,9 +1301,9 @@ define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1357,9 +1334,9 @@ define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1390,10 +1367,10 @@ define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1424,10 +1401,10 @@ define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1458,10 +1435,10 @@ define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1492,11 +1469,11 @@ define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1527,11 +1504,11 @@ define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1562,11 +1539,11 @@ define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1597,12 +1574,12 @@ define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1633,12 +1610,12 @@ define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1669,12 +1646,12 @@ define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1705,13 +1682,13 @@ define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1742,13 +1719,13 @@ define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1779,13 +1756,13 @@ define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1818,7 +1795,6 @@ define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -1848,7 +1824,6 @@ define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -1878,7 +1853,6 @@ define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -1905,12 +1879,11 @@ entry:
 define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -1937,12 +1910,11 @@ entry:
 define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -1970,10 +1942,9 @@ define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -2001,9 +1972,9 @@ define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2034,9 +2005,9 @@ define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2066,13 +2037,12 @@ entry:
 define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vluxseg4ei32.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -2100,10 +2070,10 @@ define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2134,10 +2104,10 @@ define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2168,10 +2138,10 @@ define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -2202,11 +2172,11 @@ define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2237,11 +2207,11 @@ define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2272,11 +2242,11 @@ define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -2307,12 +2277,12 @@ define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2343,12 +2313,12 @@ define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2379,12 +2349,12 @@ define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -2415,13 +2385,13 @@ define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2452,13 +2422,13 @@ define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2489,13 +2459,13 @@ define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -2528,7 +2498,6 @@ define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2558,7 +2527,6 @@ define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2588,7 +2556,6 @@ define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2615,12 +2582,11 @@ entry:
 define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2647,12 +2613,11 @@ entry:
 define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2679,12 +2644,11 @@ entry:
 define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -2712,9 +2676,9 @@ define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2745,9 +2709,9 @@ define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2778,9 +2742,9 @@ define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2811,10 +2775,10 @@ define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2845,10 +2809,10 @@ define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2879,10 +2843,10 @@ define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2913,11 +2877,11 @@ define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2948,11 +2912,11 @@ define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2983,11 +2947,11 @@ define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3018,12 +2982,12 @@ define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3054,12 +3018,12 @@ define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3090,12 +3054,12 @@ define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3126,13 +3090,13 @@ define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3163,13 +3127,13 @@ define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3200,13 +3164,13 @@ define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3239,7 +3203,6 @@ define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3269,7 +3232,6 @@ define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3299,7 +3261,6 @@ define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3326,12 +3287,11 @@ entry:
 define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3358,12 +3318,11 @@ entry:
 define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3391,10 +3350,9 @@ define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16>
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3422,9 +3380,9 @@ define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -3455,9 +3413,9 @@ define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i8(<vscale x 8 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -3487,13 +3445,12 @@ entry:
 define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
+; CHECK-NEXT:    vluxseg4ei32.v v6, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3523,7 +3480,6 @@ define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3553,7 +3509,6 @@ define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3583,7 +3538,6 @@ define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3611,10 +3565,9 @@ define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3641,12 +3594,11 @@ entry:
 define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3674,10 +3626,9 @@ define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3704,13 +3655,12 @@ entry:
 define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vluxseg4ei16.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3738,9 +3688,9 @@ define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3771,11 +3721,10 @@ define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3803,10 +3752,10 @@ define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -3837,10 +3786,10 @@ define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3871,12 +3820,11 @@ define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -3904,11 +3852,11 @@ define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -3939,11 +3887,11 @@ define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3973,15 +3921,14 @@ entry:
 define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vluxseg6ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
+; CHECK-NEXT:    vluxseg6ei32.v v7, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -4009,12 +3956,12 @@ define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -4045,12 +3992,12 @@ define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4081,12 +4028,12 @@ define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -4117,13 +4064,13 @@ define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -4154,13 +4101,13 @@ define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4191,13 +4138,13 @@ define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -4230,7 +4177,6 @@ define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -4260,7 +4206,6 @@ define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -4290,7 +4235,6 @@ define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -4320,7 +4264,6 @@ define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4350,7 +4293,6 @@ define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4380,7 +4322,6 @@ define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4407,12 +4348,11 @@ entry:
 define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4439,12 +4379,11 @@ entry:
 define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4472,10 +4411,9 @@ define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4503,9 +4441,9 @@ define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4536,9 +4474,9 @@ define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4568,13 +4506,12 @@ entry:
 define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vluxseg4ei32.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -4602,10 +4539,10 @@ define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4636,10 +4573,10 @@ define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4670,10 +4607,10 @@ define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -4704,11 +4641,11 @@ define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4739,11 +4676,11 @@ define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4774,11 +4711,11 @@ define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -4809,12 +4746,12 @@ define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4845,12 +4782,12 @@ define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4881,12 +4818,12 @@ define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -4917,13 +4854,13 @@ define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4954,13 +4891,13 @@ define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4991,13 +4928,13 @@ define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -5030,7 +4967,6 @@ define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -5060,7 +4996,6 @@ define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -5090,7 +5025,6 @@ define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -5117,12 +5051,11 @@ entry:
 define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -5149,12 +5082,11 @@ entry:
 define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -5181,12 +5113,11 @@ entry:
 define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -5214,9 +5145,9 @@ define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5247,9 +5178,9 @@ define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5280,9 +5211,9 @@ define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5313,10 +5244,10 @@ define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5347,10 +5278,10 @@ define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5381,10 +5312,10 @@ define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5415,11 +5346,11 @@ define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5450,11 +5381,11 @@ define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5485,11 +5416,11 @@ define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5520,12 +5451,12 @@ define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5556,12 +5487,12 @@ define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5592,12 +5523,12 @@ define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5628,13 +5559,13 @@ define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5665,13 +5596,13 @@ define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5702,13 +5633,13 @@ define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5741,7 +5672,6 @@ define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
@@ -5771,7 +5701,6 @@ define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl, i32 1)
@@ -5801,7 +5730,6 @@ define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -5831,7 +5759,6 @@ define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -5861,7 +5788,6 @@ define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -5888,12 +5814,11 @@ entry:
 define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -5920,12 +5845,11 @@ entry:
 define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -5952,12 +5876,11 @@ entry:
 define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -5985,9 +5908,9 @@ define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6018,9 +5941,9 @@ define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6051,9 +5974,9 @@ define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6084,10 +6007,10 @@ define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6118,10 +6041,10 @@ define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6152,10 +6075,10 @@ define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6186,11 +6109,11 @@ define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6221,11 +6144,11 @@ define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6256,11 +6179,11 @@ define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6291,12 +6214,12 @@ define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6327,12 +6250,12 @@ define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6363,12 +6286,12 @@ define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6399,13 +6322,13 @@ define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6436,13 +6359,13 @@ define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6473,13 +6396,13 @@ define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6512,7 +6435,6 @@ define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -6542,7 +6464,6 @@ define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -6572,7 +6493,6 @@ define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -6599,12 +6519,11 @@ entry:
 define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -6631,12 +6550,11 @@ entry:
 define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -6663,12 +6581,11 @@ entry:
 define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -6696,9 +6613,9 @@ define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6729,9 +6646,9 @@ define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6762,9 +6679,9 @@ define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6795,10 +6712,10 @@ define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6829,10 +6746,10 @@ define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6863,10 +6780,10 @@ define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6897,11 +6814,11 @@ define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6932,11 +6849,11 @@ define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6967,11 +6884,11 @@ define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7002,12 +6919,12 @@ define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7038,12 +6955,12 @@ define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7074,12 +6991,12 @@ define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7110,13 +7027,13 @@ define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7147,13 +7064,13 @@ define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7184,13 +7101,13 @@ define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7223,7 +7140,6 @@ define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7253,7 +7169,6 @@ define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7283,7 +7198,6 @@ define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7310,12 +7224,11 @@ entry:
 define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7342,12 +7255,11 @@ entry:
 define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7374,12 +7286,11 @@ entry:
 define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei32.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7407,9 +7318,9 @@ define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i16(<vscale x 4 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -7440,9 +7351,9 @@ define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -7473,9 +7384,9 @@ define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -7508,7 +7419,6 @@ define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -7538,7 +7448,6 @@ define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -7568,7 +7477,6 @@ define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl, i32 1)
@@ -7598,7 +7506,6 @@ define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x d
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7628,7 +7535,6 @@ define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x do
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7658,7 +7564,6 @@ define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x d
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -7688,7 +7593,6 @@ define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -7718,7 +7622,6 @@ define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -7748,7 +7651,6 @@ define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -7775,12 +7677,11 @@ entry:
 define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -7807,12 +7708,11 @@ entry:
 define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -7839,12 +7739,11 @@ entry:
 define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -7872,9 +7771,9 @@ define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7905,9 +7804,9 @@ define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7938,9 +7837,9 @@ define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7971,10 +7870,10 @@ define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8005,10 +7904,10 @@ define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8039,10 +7938,10 @@ define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8073,11 +7972,11 @@ define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8108,11 +8007,11 @@ define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8143,11 +8042,11 @@ define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8178,12 +8077,12 @@ define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8214,12 +8113,12 @@ define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8250,12 +8149,12 @@ define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8286,13 +8185,13 @@ define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8323,13 +8222,13 @@ define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8360,13 +8259,13 @@ define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8399,7 +8298,6 @@ define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -8429,7 +8327,6 @@ define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -8459,7 +8356,6 @@ define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -8486,12 +8382,11 @@ entry:
 define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -8518,12 +8413,11 @@ entry:
 define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -8550,12 +8444,11 @@ entry:
 define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -8583,9 +8476,9 @@ define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8616,9 +8509,9 @@ define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8649,9 +8542,9 @@ define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8682,10 +8575,10 @@ define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8716,10 +8609,10 @@ define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8750,10 +8643,10 @@ define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8784,11 +8677,11 @@ define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8819,11 +8712,11 @@ define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8854,11 +8747,11 @@ define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8889,12 +8782,12 @@ define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8925,12 +8818,12 @@ define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8961,12 +8854,12 @@ define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8997,13 +8890,13 @@ define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9034,13 +8927,13 @@ define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9071,13 +8964,13 @@ define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9110,7 +9003,6 @@ define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9140,7 +9032,6 @@ define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9170,7 +9061,6 @@ define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9197,12 +9087,11 @@ entry:
 define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9229,12 +9118,11 @@ entry:
 define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9261,12 +9149,11 @@ entry:
 define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9294,9 +9181,9 @@ define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9327,9 +9214,9 @@ define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9360,9 +9247,9 @@ define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9393,10 +9280,10 @@ define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9427,10 +9314,10 @@ define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9461,10 +9348,10 @@ define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9495,11 +9382,11 @@ define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9530,11 +9417,11 @@ define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9565,11 +9452,11 @@ define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9600,12 +9487,12 @@ define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9636,12 +9523,12 @@ define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9672,12 +9559,12 @@ define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9708,13 +9595,13 @@ define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9745,13 +9632,13 @@ define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9782,13 +9669,13 @@ define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9821,7 +9708,6 @@ define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9851,7 +9737,6 @@ define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9881,7 +9766,6 @@ define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9908,12 +9792,11 @@ entry:
 define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9940,12 +9823,11 @@ entry:
 define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -9972,12 +9854,11 @@ entry:
 define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl, i32 1)
@@ -10005,9 +9886,9 @@ define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10038,9 +9919,9 @@ define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10071,9 +9952,9 @@ define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10104,10 +9985,10 @@ define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10138,10 +10019,10 @@ define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10172,10 +10053,10 @@ define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10206,11 +10087,11 @@ define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10241,11 +10122,11 @@ define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10276,11 +10157,11 @@ define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10311,12 +10192,12 @@ define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10347,12 +10228,12 @@ define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10383,12 +10264,12 @@ define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10419,13 +10300,13 @@ define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10456,13 +10337,13 @@ define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10493,13 +10374,13 @@ define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10532,7 +10413,6 @@ define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x hal
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10562,7 +10442,6 @@ define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10592,7 +10471,6 @@ define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x hal
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10619,12 +10497,11 @@ entry:
 define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10651,12 +10528,11 @@ entry:
 define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10684,10 +10560,9 @@ define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x hal
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10715,9 +10590,9 @@ define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i16(<vscale x 8 x hal
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -10748,9 +10623,9 @@ define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i8(<vscale x 8 x half
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -10780,13 +10655,12 @@ entry:
 define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
+; CHECK-NEXT:    vluxseg4ei32.v v6, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10816,7 +10690,6 @@ define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x fl
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10846,7 +10719,6 @@ define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x flo
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10876,7 +10748,6 @@ define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x fl
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl, i32 1)
@@ -10906,7 +10777,6 @@ define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x d
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -10936,7 +10806,6 @@ define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x do
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -10966,7 +10835,6 @@ define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x d
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -10993,12 +10861,11 @@ entry:
 define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei32.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -11025,12 +10892,11 @@ entry:
 define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -11057,12 +10923,11 @@ entry:
 define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -11090,9 +10955,9 @@ define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i32(<vscale x 2 x d
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -11123,9 +10988,9 @@ define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i8(<vscale x 2 x do
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -11156,9 +11021,9 @@ define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i16(<vscale x 2 x d
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -11191,7 +11056,6 @@ define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -11221,7 +11085,6 @@ define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -11251,7 +11114,6 @@ define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -11278,12 +11140,11 @@ entry:
 define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -11310,12 +11171,11 @@ entry:
 define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -11343,10 +11203,9 @@ define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -11374,9 +11233,9 @@ define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11407,9 +11266,9 @@ define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11439,13 +11298,12 @@ entry:
 define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vluxseg4ei32.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -11473,10 +11331,10 @@ define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11507,10 +11365,10 @@ define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11541,10 +11399,10 @@ define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -11575,11 +11433,11 @@ define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11610,11 +11468,11 @@ define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11645,11 +11503,11 @@ define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -11680,12 +11538,12 @@ define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11716,12 +11574,12 @@ define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11752,12 +11610,12 @@ define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -11788,13 +11646,13 @@ define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11825,13 +11683,13 @@ define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11862,13 +11720,13 @@ define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -11901,7 +11759,6 @@ define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -11931,7 +11788,6 @@ define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -11961,7 +11817,6 @@ define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -11988,12 +11843,11 @@ entry:
 define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -12020,12 +11874,11 @@ entry:
 define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -12052,12 +11905,11 @@ entry:
 define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl, i32 1)
@@ -12085,9 +11937,9 @@ define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12118,9 +11970,9 @@ define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12151,9 +12003,9 @@ define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12184,10 +12036,10 @@ define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12218,10 +12070,10 @@ define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12252,10 +12104,10 @@ define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12286,11 +12138,11 @@ define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12321,11 +12173,11 @@ define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12356,11 +12208,11 @@ define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12391,12 +12243,12 @@ define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12427,12 +12279,12 @@ define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12463,12 +12315,12 @@ define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12499,13 +12351,13 @@ define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12536,13 +12388,13 @@ define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12573,13 +12425,13 @@ define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12612,7 +12464,6 @@ define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x fl
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -12642,7 +12493,6 @@ define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x flo
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -12672,7 +12522,6 @@ define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x fl
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -12699,12 +12548,11 @@ entry:
 define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -12731,12 +12579,11 @@ entry:
 define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -12763,12 +12610,11 @@ entry:
 define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei32.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl, i32 1)
@@ -12796,9 +12642,9 @@ define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i16(<vscale x 4 x fl
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -12829,9 +12675,9 @@ define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i8(<vscale x 4 x flo
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -12862,9 +12708,9 @@ define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i32(<vscale x 4 x fl
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll
index 0072cfef8ae92..c8a419b672bff 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vluxseg-rv64.ll
@@ -24,7 +24,6 @@ define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -54,7 +53,6 @@ define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -84,7 +82,6 @@ define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vluxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -114,7 +111,6 @@ define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -144,7 +140,6 @@ define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -174,7 +169,6 @@ define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i64(<vscale x 4 x i32>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -204,7 +198,6 @@ define <vscale x 4 x i32> @test_vluxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -231,12 +224,11 @@ entry:
 define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei32.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i32(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -263,12 +255,11 @@ entry:
 define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i8(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -296,10 +287,9 @@ define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i64(<vscale x 4 x i32>
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg3ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -326,12 +316,11 @@ entry:
 define <vscale x 4 x i32> @test_vluxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg3.mask.nxv4i32.nxv4i16(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -359,9 +348,9 @@ define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i32(<vscale x 4 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -392,9 +381,9 @@ define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i8(<vscale x 4 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -424,13 +413,12 @@ entry:
 define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxseg4ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
+; CHECK-NEXT:    vluxseg4ei64.v v6, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>,<vscale x 4 x i32>} @llvm.riscv.vluxseg4.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -458,9 +446,9 @@ define <vscale x 4 x i32> @test_vluxseg4_mask_nxv4i32_nxv4i16(<vscale x 4 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -493,7 +481,6 @@ define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -523,7 +510,6 @@ define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -553,7 +539,6 @@ define <vscale x 16 x i8> @test_vluxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -581,10 +566,9 @@ define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8
 ; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vluxseg3ei16.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -611,12 +595,11 @@ entry:
 define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i8(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -644,10 +627,9 @@ define <vscale x 16 x i8> @test_vluxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8
 ; CHECK-LABEL: test_vluxseg3_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vluxseg3ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -674,13 +656,12 @@ entry:
 define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
-; CHECK-NEXT:    vluxseg4ei16.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
+; CHECK-NEXT:    vluxseg4ei16.v v6, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -708,9 +689,9 @@ define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i8(<vscale x 16 x i8>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -741,11 +722,10 @@ define <vscale x 16 x i8> @test_vluxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8
 ; CHECK-LABEL: test_vluxseg4_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>,<vscale x 16 x i8>} @llvm.riscv.vluxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -775,7 +755,6 @@ define <vscale x 1 x i64> @test_vluxseg2_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -805,7 +784,6 @@ define <vscale x 1 x i64> @test_vluxseg2_mask_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -835,7 +813,6 @@ define <vscale x 1 x i64> @test_vluxseg2_mask_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -865,7 +842,6 @@ define <vscale x 1 x i64> @test_vluxseg2_mask_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg2.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -892,12 +868,11 @@ entry:
 define <vscale x 1 x i64> @test_vluxseg3_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei64.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i64(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -924,12 +899,11 @@ entry:
 define <vscale x 1 x i64> @test_vluxseg3_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i32(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -956,12 +930,11 @@ entry:
 define <vscale x 1 x i64> @test_vluxseg3_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i16(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -988,12 +961,11 @@ entry:
 define <vscale x 1 x i64> @test_vluxseg3_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i64>,<vscale x 1 x i64>,<vscale x 1 x i64>} @llvm.riscv.vluxseg3.mask.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1021,9 +993,9 @@ define <vscale x 1 x i64> @test_vluxseg4_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1054,9 +1026,9 @@ define <vscale x 1 x i64> @test_vluxseg4_mask_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1087,9 +1059,9 @@ define <vscale x 1 x i64> @test_vluxseg4_mask_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1120,9 +1092,9 @@ define <vscale x 1 x i64> @test_vluxseg4_mask_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1153,10 +1125,10 @@ define <vscale x 1 x i64> @test_vluxseg5_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1187,10 +1159,10 @@ define <vscale x 1 x i64> @test_vluxseg5_mask_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1221,10 +1193,10 @@ define <vscale x 1 x i64> @test_vluxseg5_mask_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1255,10 +1227,10 @@ define <vscale x 1 x i64> @test_vluxseg5_mask_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1289,11 +1261,11 @@ define <vscale x 1 x i64> @test_vluxseg6_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1324,11 +1296,11 @@ define <vscale x 1 x i64> @test_vluxseg6_mask_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1359,11 +1331,11 @@ define <vscale x 1 x i64> @test_vluxseg6_mask_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1394,11 +1366,11 @@ define <vscale x 1 x i64> @test_vluxseg6_mask_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1429,12 +1401,12 @@ define <vscale x 1 x i64> @test_vluxseg7_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1465,12 +1437,12 @@ define <vscale x 1 x i64> @test_vluxseg7_mask_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1501,12 +1473,12 @@ define <vscale x 1 x i64> @test_vluxseg7_mask_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1537,12 +1509,12 @@ define <vscale x 1 x i64> @test_vluxseg7_mask_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1573,13 +1545,13 @@ define <vscale x 1 x i64> @test_vluxseg8_mask_nxv1i64_nxv1i64(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1610,13 +1582,13 @@ define <vscale x 1 x i64> @test_vluxseg8_mask_nxv1i64_nxv1i32(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1647,13 +1619,13 @@ define <vscale x 1 x i64> @test_vluxseg8_mask_nxv1i64_nxv1i16(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1684,13 +1656,13 @@ define <vscale x 1 x i64> @test_vluxseg8_mask_nxv1i64_nxv1i8(<vscale x 1 x i64>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -1723,7 +1695,6 @@ define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1753,7 +1724,6 @@ define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1783,7 +1753,6 @@ define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1813,7 +1782,6 @@ define <vscale x 1 x i32> @test_vluxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1840,12 +1808,11 @@ entry:
 define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei64.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i64(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1872,12 +1839,11 @@ entry:
 define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i32(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1904,12 +1870,11 @@ entry:
 define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1936,12 +1901,11 @@ entry:
 define <vscale x 1 x i32> @test_vluxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i32>,<vscale x 1 x i32>,<vscale x 1 x i32>} @llvm.riscv.vluxseg3.mask.nxv1i32.nxv1i8(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -1969,9 +1933,9 @@ define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2002,9 +1966,9 @@ define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2035,9 +1999,9 @@ define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2068,9 +2032,9 @@ define <vscale x 1 x i32> @test_vluxseg4_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2101,10 +2065,10 @@ define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2135,10 +2099,10 @@ define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2169,10 +2133,10 @@ define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2203,10 +2167,10 @@ define <vscale x 1 x i32> @test_vluxseg5_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2237,11 +2201,11 @@ define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2272,11 +2236,11 @@ define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2307,11 +2271,11 @@ define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2342,11 +2306,11 @@ define <vscale x 1 x i32> @test_vluxseg6_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2377,12 +2341,12 @@ define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2413,12 +2377,12 @@ define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2449,12 +2413,12 @@ define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2485,12 +2449,12 @@ define <vscale x 1 x i32> @test_vluxseg7_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2521,13 +2485,13 @@ define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i64(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2558,13 +2522,13 @@ define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i32(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2595,13 +2559,13 @@ define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i16(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2632,13 +2596,13 @@ define <vscale x 1 x i32> @test_vluxseg8_mask_nxv1i32_nxv1i8(<vscale x 1 x i32>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -2671,7 +2635,6 @@ define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2701,7 +2664,6 @@ define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2731,7 +2693,6 @@ define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i64(<vscale x 8 x i16>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2761,7 +2722,6 @@ define <vscale x 8 x i16> @test_vluxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2788,12 +2748,11 @@ entry:
 define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i16(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2820,12 +2779,11 @@ entry:
 define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i8(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2853,10 +2811,9 @@ define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i64(<vscale x 8 x i16>
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg3ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2884,10 +2841,9 @@ define <vscale x 8 x i16> @test_vluxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16>
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -2915,9 +2871,9 @@ define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i16(<vscale x 8 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -2948,9 +2904,9 @@ define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i8(<vscale x 8 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -2981,11 +2937,10 @@ define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i64(<vscale x 8 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i64(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -3012,13 +2967,12 @@ entry:
 define <vscale x 8 x i16> @test_vluxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
+; CHECK-NEXT:    vluxseg4ei32.v v6, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>,<vscale x 8 x i16>} @llvm.riscv.vluxseg4.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -3048,7 +3002,6 @@ define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3078,7 +3031,6 @@ define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3108,7 +3060,6 @@ define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3138,7 +3089,6 @@ define <vscale x 4 x i8> @test_vluxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3166,10 +3116,9 @@ define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3196,12 +3145,11 @@ entry:
 define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i8(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3229,10 +3177,9 @@ define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3259,12 +3206,11 @@ entry:
 define <vscale x 4 x i8> @test_vluxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg3.mask.nxv4i8.nxv4i16(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3291,13 +3237,12 @@ entry:
 define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vluxseg4ei32.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3325,9 +3270,9 @@ define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3358,11 +3303,10 @@ define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg4.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3390,9 +3334,9 @@ define <vscale x 4 x i8> @test_vluxseg4_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3423,10 +3367,10 @@ define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -3457,10 +3401,10 @@ define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3491,12 +3435,11 @@ define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3524,10 +3467,10 @@ define <vscale x 4 x i8> @test_vluxseg5_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3558,11 +3501,11 @@ define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -3593,11 +3536,11 @@ define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3627,15 +3570,14 @@ entry:
 define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vluxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
+; CHECK-NEXT:    vluxseg6ei64.v v7, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>,<vscale x 4 x i8>} @llvm.riscv.vluxseg6.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -3663,11 +3605,11 @@ define <vscale x 4 x i8> @test_vluxseg6_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3698,12 +3640,12 @@ define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -3734,12 +3676,12 @@ define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3770,12 +3712,12 @@ define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -3806,12 +3748,12 @@ define <vscale x 4 x i8> @test_vluxseg7_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3842,13 +3784,13 @@ define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -3879,13 +3821,13 @@ define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %va
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3916,13 +3858,13 @@ define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -3953,13 +3895,13 @@ define <vscale x 4 x i8> @test_vluxseg8_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -3992,7 +3934,6 @@ define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i64(<vscale x 1 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4022,7 +3963,6 @@ define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4052,7 +3992,6 @@ define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4082,7 +4021,6 @@ define <vscale x 1 x i16> @test_vluxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4109,12 +4047,11 @@ entry:
 define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei64.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i64(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4141,12 +4078,11 @@ entry:
 define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i32(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4173,12 +4109,11 @@ entry:
 define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i16(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4205,12 +4140,11 @@ entry:
 define <vscale x 1 x i16> @test_vluxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i16>,<vscale x 1 x i16>,<vscale x 1 x i16>} @llvm.riscv.vluxseg3.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -4238,9 +4172,9 @@ define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i64(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4271,9 +4205,9 @@ define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4304,9 +4238,9 @@ define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4337,9 +4271,9 @@ define <vscale x 1 x i16> @test_vluxseg4_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4370,10 +4304,10 @@ define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i64(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4404,10 +4338,10 @@ define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4438,10 +4372,10 @@ define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4472,10 +4406,10 @@ define <vscale x 1 x i16> @test_vluxseg5_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4506,11 +4440,11 @@ define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i64(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4541,11 +4475,11 @@ define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4576,11 +4510,11 @@ define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4611,11 +4545,11 @@ define <vscale x 1 x i16> @test_vluxseg6_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4646,12 +4580,12 @@ define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i64(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4682,12 +4616,12 @@ define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4718,12 +4652,12 @@ define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4754,12 +4688,12 @@ define <vscale x 1 x i16> @test_vluxseg7_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4790,13 +4724,13 @@ define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i64(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4827,13 +4761,13 @@ define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i32(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4864,13 +4798,13 @@ define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i16(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4901,13 +4835,13 @@ define <vscale x 1 x i16> @test_vluxseg8_mask_nxv1i16_nxv1i8(<vscale x 1 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -4940,7 +4874,6 @@ define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -4970,7 +4903,6 @@ define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5000,7 +4932,6 @@ define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5030,7 +4961,6 @@ define <vscale x 2 x i32> @test_vluxseg2_mask_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg2.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5057,12 +4987,11 @@ entry:
 define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i32(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5089,12 +5018,11 @@ entry:
 define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i8(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5121,12 +5049,11 @@ entry:
 define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i16(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5154,10 +5081,9 @@ define <vscale x 2 x i32> @test_vluxseg3_mask_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5185,9 +5111,9 @@ define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5218,9 +5144,9 @@ define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5251,9 +5177,9 @@ define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5283,13 +5209,12 @@ entry:
 define <vscale x 2 x i32> @test_vluxseg4_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vluxseg4ei64.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>,<vscale x 2 x i32>} @llvm.riscv.vluxseg4.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -5317,10 +5242,10 @@ define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5351,10 +5276,10 @@ define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5385,10 +5310,10 @@ define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5419,10 +5344,10 @@ define <vscale x 2 x i32> @test_vluxseg5_mask_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -5453,11 +5378,11 @@ define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5488,11 +5413,11 @@ define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5523,11 +5448,11 @@ define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5558,11 +5483,11 @@ define <vscale x 2 x i32> @test_vluxseg6_mask_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -5593,12 +5518,12 @@ define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5629,12 +5554,12 @@ define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5665,12 +5590,12 @@ define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5701,12 +5626,12 @@ define <vscale x 2 x i32> @test_vluxseg7_mask_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -5737,13 +5662,13 @@ define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i32(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5774,13 +5699,13 @@ define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i8(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5811,13 +5736,13 @@ define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i16(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -5848,13 +5773,13 @@ define <vscale x 2 x i32> @test_vluxseg8_mask_nxv2i32_nxv2i64(<vscale x 2 x i32>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -5887,7 +5812,6 @@ define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -5917,7 +5841,6 @@ define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -5947,7 +5870,6 @@ define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -5977,7 +5899,6 @@ define <vscale x 8 x i8> @test_vluxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6005,10 +5926,9 @@ define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6035,12 +5955,11 @@ entry:
 define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i8(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6068,10 +5987,9 @@ define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg3ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6099,10 +6017,9 @@ define <vscale x 8 x i8> @test_vluxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6129,13 +6046,12 @@ entry:
 define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vluxseg4ei16.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6163,9 +6079,9 @@ define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6196,11 +6112,10 @@ define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6228,11 +6143,10 @@ define <vscale x 8 x i8> @test_vluxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6260,10 +6174,10 @@ define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -6294,10 +6208,10 @@ define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6328,12 +6242,11 @@ define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6361,12 +6274,11 @@ define <vscale x 8 x i8> @test_vluxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6394,11 +6306,11 @@ define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -6429,11 +6341,11 @@ define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6464,13 +6376,12 @@ define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6497,15 +6408,14 @@ entry:
 define <vscale x 8 x i8> @test_vluxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vluxseg6ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
+; CHECK-NEXT:    vluxseg6ei32.v v7, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg6.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6533,12 +6443,12 @@ define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -6569,12 +6479,12 @@ define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6605,14 +6515,13 @@ define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg7.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6640,12 +6549,12 @@ define <vscale x 8 x i8> @test_vluxseg7_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -6676,13 +6585,13 @@ define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -6713,13 +6622,13 @@ define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %va
 ; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -6750,15 +6659,14 @@ define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
-; CHECK-NEXT:    vmv1r.v v12, v7
-; CHECK-NEXT:    vmv1r.v v13, v7
-; CHECK-NEXT:    vmv1r.v v14, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei64.v v7, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>,<vscale x 8 x i8>} @llvm.riscv.vluxseg8.mask.nxv8i8.nxv8i64(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -6786,13 +6694,13 @@ define <vscale x 8 x i8> @test_vluxseg8_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -6825,7 +6733,6 @@ define <vscale x 4 x i64> @test_vluxseg2_mask_nxv4i64_nxv4i32(<vscale x 4 x i64>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i32(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -6855,7 +6762,6 @@ define <vscale x 4 x i64> @test_vluxseg2_mask_nxv4i64_nxv4i8(<vscale x 4 x i64>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i8(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -6885,7 +6791,6 @@ define <vscale x 4 x i64> @test_vluxseg2_mask_nxv4i64_nxv4i64(<vscale x 4 x i64>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i64(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -6915,7 +6820,6 @@ define <vscale x 4 x i64> @test_vluxseg2_mask_nxv4i64_nxv4i16(<vscale x 4 x i64>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i64>,<vscale x 4 x i64>} @llvm.riscv.vluxseg2.mask.nxv4i64.nxv4i16(<vscale x 4 x i64> %val,<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -6945,7 +6849,6 @@ define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -6975,7 +6878,6 @@ define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7005,7 +6907,6 @@ define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i64(<vscale x 4 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7035,7 +6936,6 @@ define <vscale x 4 x i16> @test_vluxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7063,10 +6963,9 @@ define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7093,12 +6992,11 @@ entry:
 define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i8(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7126,10 +7024,9 @@ define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i64(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7156,12 +7053,11 @@ entry:
 define <vscale x 4 x i16> @test_vluxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg3.mask.nxv4i16.nxv4i16(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7188,13 +7084,12 @@ entry:
 define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vluxseg4ei32.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7222,9 +7117,9 @@ define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7255,11 +7150,10 @@ define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i64(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg4.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7287,9 +7181,9 @@ define <vscale x 4 x i16> @test_vluxseg4_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7320,10 +7214,10 @@ define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -7354,10 +7248,10 @@ define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7388,12 +7282,11 @@ define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i64(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7421,10 +7314,10 @@ define <vscale x 4 x i16> @test_vluxseg5_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7455,11 +7348,11 @@ define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -7490,11 +7383,11 @@ define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7524,15 +7417,14 @@ entry:
 define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
+; CHECK-NEXT:    vluxseg6ei64.v v7, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>,<vscale x 4 x i16>} @llvm.riscv.vluxseg6.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -7560,11 +7452,11 @@ define <vscale x 4 x i16> @test_vluxseg6_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7595,12 +7487,12 @@ define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -7631,12 +7523,12 @@ define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7667,12 +7559,12 @@ define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i64(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -7703,12 +7595,12 @@ define <vscale x 4 x i16> @test_vluxseg7_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7739,13 +7631,13 @@ define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i32(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -7776,13 +7668,13 @@ define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i8(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7813,13 +7705,13 @@ define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i64(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -7850,13 +7742,13 @@ define <vscale x 4 x i16> @test_vluxseg8_mask_nxv4i16_nxv4i16(<vscale x 4 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -7889,7 +7781,6 @@ define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -7919,7 +7810,6 @@ define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -7949,7 +7839,6 @@ define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -7979,7 +7868,6 @@ define <vscale x 1 x i8> @test_vluxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -8006,12 +7894,11 @@ entry:
 define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei64.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i64(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -8038,12 +7925,11 @@ entry:
 define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i32(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -8070,12 +7956,11 @@ entry:
 define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i16(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -8102,12 +7987,11 @@ entry:
 define <vscale x 1 x i8> @test_vluxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x i8>,<vscale x 1 x i8>,<vscale x 1 x i8>} @llvm.riscv.vluxseg3.mask.nxv1i8.nxv1i8(<vscale x 1 x i8> %val,<vscale x 1 x i8> %val,<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -8135,9 +8019,9 @@ define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8168,9 +8052,9 @@ define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8201,9 +8085,9 @@ define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8234,9 +8118,9 @@ define <vscale x 1 x i8> @test_vluxseg4_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8267,10 +8151,10 @@ define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8301,10 +8185,10 @@ define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8335,10 +8219,10 @@ define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8369,10 +8253,10 @@ define <vscale x 1 x i8> @test_vluxseg5_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8403,11 +8287,11 @@ define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8438,11 +8322,11 @@ define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8473,11 +8357,11 @@ define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8508,11 +8392,11 @@ define <vscale x 1 x i8> @test_vluxseg6_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8543,12 +8427,12 @@ define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8579,12 +8463,12 @@ define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8615,12 +8499,12 @@ define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8651,12 +8535,12 @@ define <vscale x 1 x i8> @test_vluxseg7_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8687,13 +8571,13 @@ define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8724,13 +8608,13 @@ define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8761,14 +8645,14 @@ define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
 ; CHECK-NEXT:    ret
@@ -8798,13 +8682,13 @@ define <vscale x 1 x i8> @test_vluxseg8_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %va
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -8837,7 +8721,6 @@ define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -8867,7 +8750,6 @@ define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -8897,7 +8779,6 @@ define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -8927,7 +8808,6 @@ define <vscale x 2 x i8> @test_vluxseg2_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %v
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg2.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -8954,12 +8834,11 @@ entry:
 define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i32(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -8986,12 +8865,11 @@ entry:
 define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i8(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -9018,12 +8896,11 @@ entry:
 define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i16(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -9051,10 +8928,9 @@ define <vscale x 2 x i8> @test_vluxseg3_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg3.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -9082,9 +8958,9 @@ define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9115,9 +8991,9 @@ define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9148,9 +9024,9 @@ define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9180,13 +9056,12 @@ entry:
 define <vscale x 2 x i8> @test_vluxseg4_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vluxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vluxseg4ei64.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>,<vscale x 2 x i8>} @llvm.riscv.vluxseg4.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -9214,10 +9089,10 @@ define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9248,10 +9123,10 @@ define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9282,10 +9157,10 @@ define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9316,10 +9191,10 @@ define <vscale x 2 x i8> @test_vluxseg5_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -9350,11 +9225,11 @@ define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9385,11 +9260,11 @@ define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9420,11 +9295,11 @@ define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9455,11 +9330,11 @@ define <vscale x 2 x i8> @test_vluxseg6_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -9490,12 +9365,12 @@ define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9526,12 +9401,12 @@ define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9562,12 +9437,12 @@ define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9598,12 +9473,12 @@ define <vscale x 2 x i8> @test_vluxseg7_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -9634,13 +9509,13 @@ define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9671,13 +9546,13 @@ define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %va
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9708,13 +9583,13 @@ define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -9745,13 +9620,13 @@ define <vscale x 2 x i8> @test_vluxseg8_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %v
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -9784,7 +9659,6 @@ define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -9814,7 +9688,6 @@ define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -9844,7 +9717,6 @@ define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i64(<vscale x 8 x i32>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v4, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i64(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -9874,7 +9746,6 @@ define <vscale x 8 x i32> @test_vluxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x i32>,<vscale x 8 x i32>} @llvm.riscv.vluxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32> %val,<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -9904,7 +9775,6 @@ define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
@@ -9934,7 +9804,6 @@ define <vscale x 32 x i8> @test_vluxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8>
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 32 x i8>,<vscale x 32 x i8>} @llvm.riscv.vluxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8> %val,<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl, i64 1)
@@ -9964,7 +9833,6 @@ define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -9994,7 +9862,6 @@ define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10024,7 +9891,6 @@ define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10054,7 +9920,6 @@ define <vscale x 2 x i16> @test_vluxseg2_mask_nxv2i16_nxv2i64(<vscale x 2 x i16>
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg2.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10081,12 +9946,11 @@ entry:
 define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i32(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10113,12 +9977,11 @@ entry:
 define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i8(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10145,12 +10008,11 @@ entry:
 define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i16(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10178,10 +10040,9 @@ define <vscale x 2 x i16> @test_vluxseg3_mask_nxv2i16_nxv2i64(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg3.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10209,9 +10070,9 @@ define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10242,9 +10103,9 @@ define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10275,9 +10136,9 @@ define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10307,13 +10168,12 @@ entry:
 define <vscale x 2 x i16> @test_vluxseg4_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vluxseg4ei64.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>,<vscale x 2 x i16>} @llvm.riscv.vluxseg4.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10341,10 +10201,10 @@ define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10375,10 +10235,10 @@ define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10409,10 +10269,10 @@ define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10443,10 +10303,10 @@ define <vscale x 2 x i16> @test_vluxseg5_mask_nxv2i16_nxv2i64(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -10477,11 +10337,11 @@ define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10512,11 +10372,11 @@ define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10547,11 +10407,11 @@ define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10582,11 +10442,11 @@ define <vscale x 2 x i16> @test_vluxseg6_mask_nxv2i16_nxv2i64(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -10617,12 +10477,12 @@ define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10653,12 +10513,12 @@ define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10689,12 +10549,12 @@ define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10725,12 +10585,12 @@ define <vscale x 2 x i16> @test_vluxseg7_mask_nxv2i16_nxv2i64(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -10761,13 +10621,13 @@ define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i32(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10798,13 +10658,13 @@ define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i8(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10835,13 +10695,13 @@ define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i16(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -10872,13 +10732,13 @@ define <vscale x 2 x i16> @test_vluxseg8_mask_nxv2i16_nxv2i64(<vscale x 2 x i16>
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -10911,7 +10771,6 @@ define <vscale x 2 x i64> @test_vluxseg2_mask_nxv2i64_nxv2i32(<vscale x 2 x i64>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10941,7 +10800,6 @@ define <vscale x 2 x i64> @test_vluxseg2_mask_nxv2i64_nxv2i8(<vscale x 2 x i64>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -10971,7 +10829,6 @@ define <vscale x 2 x i64> @test_vluxseg2_mask_nxv2i64_nxv2i16(<vscale x 2 x i64>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -11001,7 +10858,6 @@ define <vscale x 2 x i64> @test_vluxseg2_mask_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg2.mask.nxv2i64.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -11028,12 +10884,11 @@ entry:
 define <vscale x 2 x i64> @test_vluxseg3_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei32.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i32(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -11060,12 +10915,11 @@ entry:
 define <vscale x 2 x i64> @test_vluxseg3_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i8(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -11092,12 +10946,11 @@ entry:
 define <vscale x 2 x i64> @test_vluxseg3_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i16(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -11124,12 +10977,11 @@ entry:
 define <vscale x 2 x i64> @test_vluxseg3_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei64.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x i64>,<vscale x 2 x i64>,<vscale x 2 x i64>} @llvm.riscv.vluxseg3.mask.nxv2i64.nxv2i64(<vscale x 2 x i64> %val,<vscale x 2 x i64> %val,<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -11157,9 +11009,9 @@ define <vscale x 2 x i64> @test_vluxseg4_mask_nxv2i64_nxv2i32(<vscale x 2 x i64>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -11190,9 +11042,9 @@ define <vscale x 2 x i64> @test_vluxseg4_mask_nxv2i64_nxv2i8(<vscale x 2 x i64>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -11223,9 +11075,9 @@ define <vscale x 2 x i64> @test_vluxseg4_mask_nxv2i64_nxv2i16(<vscale x 2 x i64>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -11256,9 +11108,9 @@ define <vscale x 2 x i64> @test_vluxseg4_mask_nxv2i64_nxv2i64(<vscale x 2 x i64>
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -11291,7 +11143,6 @@ define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -11321,7 +11172,6 @@ define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -11351,7 +11201,6 @@ define <vscale x 16 x half> @test_vluxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v4, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x half>,<vscale x 16 x half>} @llvm.riscv.vluxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half> %val,<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl, i64 1)
@@ -11381,7 +11230,6 @@ define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x d
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -11411,7 +11259,6 @@ define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x do
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -11441,7 +11288,6 @@ define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i64(<vscale x 4 x d
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i64(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -11471,7 +11317,6 @@ define <vscale x 4 x double> @test_vluxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x d
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x double>,<vscale x 4 x double>} @llvm.riscv.vluxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double> %val,<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -11501,7 +11346,6 @@ define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i64(<vscale x 1 x d
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -11531,7 +11375,6 @@ define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -11561,7 +11404,6 @@ define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -11591,7 +11433,6 @@ define <vscale x 1 x double> @test_vluxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -11618,12 +11459,11 @@ entry:
 define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei64.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i64(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -11650,12 +11490,11 @@ entry:
 define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i32(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -11682,12 +11521,11 @@ entry:
 define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i16(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -11714,12 +11552,11 @@ entry:
 define <vscale x 1 x double> @test_vluxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x double>,<vscale x 1 x double>,<vscale x 1 x double>} @llvm.riscv.vluxseg3.mask.nxv1f64.nxv1i8(<vscale x 1 x double> %val,<vscale x 1 x double> %val,<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -11747,9 +11584,9 @@ define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i64(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11780,9 +11617,9 @@ define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11813,9 +11650,9 @@ define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11846,9 +11683,9 @@ define <vscale x 1 x double> @test_vluxseg4_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11879,10 +11716,10 @@ define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i64(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11913,10 +11750,10 @@ define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11947,10 +11784,10 @@ define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -11981,10 +11818,10 @@ define <vscale x 1 x double> @test_vluxseg5_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12015,11 +11852,11 @@ define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i64(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12050,11 +11887,11 @@ define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12085,11 +11922,11 @@ define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12120,11 +11957,11 @@ define <vscale x 1 x double> @test_vluxseg6_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12155,12 +11992,12 @@ define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i64(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12191,12 +12028,12 @@ define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12227,12 +12064,12 @@ define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12263,12 +12100,12 @@ define <vscale x 1 x double> @test_vluxseg7_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12299,13 +12136,13 @@ define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i64(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12336,13 +12173,13 @@ define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i32(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12373,13 +12210,13 @@ define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i16(<vscale x 1 x d
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12410,13 +12247,13 @@ define <vscale x 1 x double> @test_vluxseg8_mask_nxv1f64_nxv1i8(<vscale x 1 x do
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12449,7 +12286,6 @@ define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12479,7 +12315,6 @@ define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12509,7 +12344,6 @@ define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12539,7 +12373,6 @@ define <vscale x 2 x float> @test_vluxseg2_mask_nxv2f32_nxv2i64(<vscale x 2 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg2.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12566,12 +12399,11 @@ entry:
 define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i32(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12598,12 +12430,11 @@ entry:
 define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i8(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12630,12 +12461,11 @@ entry:
 define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i16(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12663,10 +12493,9 @@ define <vscale x 2 x float> @test_vluxseg3_mask_nxv2f32_nxv2i64(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg3.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12694,9 +12523,9 @@ define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12727,9 +12556,9 @@ define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12760,9 +12589,9 @@ define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12792,13 +12621,12 @@ entry:
 define <vscale x 2 x float> @test_vluxseg4_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vluxseg4ei64.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>,<vscale x 2 x float>} @llvm.riscv.vluxseg4.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -12826,10 +12654,10 @@ define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12860,10 +12688,10 @@ define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12894,10 +12722,10 @@ define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12928,10 +12756,10 @@ define <vscale x 2 x float> @test_vluxseg5_mask_nxv2f32_nxv2i64(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -12962,11 +12790,11 @@ define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -12997,11 +12825,11 @@ define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13032,11 +12860,11 @@ define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13067,11 +12895,11 @@ define <vscale x 2 x float> @test_vluxseg6_mask_nxv2f32_nxv2i64(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -13102,12 +12930,12 @@ define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13138,12 +12966,12 @@ define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13174,12 +13002,12 @@ define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13210,12 +13038,12 @@ define <vscale x 2 x float> @test_vluxseg7_mask_nxv2f32_nxv2i64(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -13246,13 +13074,13 @@ define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i32(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13283,13 +13111,13 @@ define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i8(<vscale x 2 x flo
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13320,13 +13148,13 @@ define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i16(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13357,13 +13185,13 @@ define <vscale x 2 x float> @test_vluxseg8_mask_nxv2f32_nxv2i64(<vscale x 2 x fl
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -13396,7 +13224,6 @@ define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i64(<vscale x 1 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -13426,7 +13253,6 @@ define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -13456,7 +13282,6 @@ define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -13486,7 +13311,6 @@ define <vscale x 1 x half> @test_vluxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -13513,12 +13337,11 @@ entry:
 define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei64.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i64(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -13545,12 +13368,11 @@ entry:
 define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i32(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -13577,12 +13399,11 @@ entry:
 define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i16(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -13609,12 +13430,11 @@ entry:
 define <vscale x 1 x half> @test_vluxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x half>,<vscale x 1 x half>,<vscale x 1 x half>} @llvm.riscv.vluxseg3.mask.nxv1f16.nxv1i8(<vscale x 1 x half> %val,<vscale x 1 x half> %val,<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -13642,9 +13462,9 @@ define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i64(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13675,9 +13495,9 @@ define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13708,9 +13528,9 @@ define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13741,9 +13561,9 @@ define <vscale x 1 x half> @test_vluxseg4_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13774,10 +13594,10 @@ define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i64(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13808,10 +13628,10 @@ define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13842,10 +13662,10 @@ define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13876,10 +13696,10 @@ define <vscale x 1 x half> @test_vluxseg5_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13910,11 +13730,11 @@ define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i64(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13945,11 +13765,11 @@ define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -13980,11 +13800,11 @@ define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14015,11 +13835,11 @@ define <vscale x 1 x half> @test_vluxseg6_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14050,12 +13870,12 @@ define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i64(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14086,12 +13906,12 @@ define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14122,12 +13942,12 @@ define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14158,12 +13978,12 @@ define <vscale x 1 x half> @test_vluxseg7_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14194,13 +14014,13 @@ define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i64(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14231,13 +14051,13 @@ define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i32(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14268,13 +14088,13 @@ define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i16(<vscale x 1 x hal
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14305,13 +14125,13 @@ define <vscale x 1 x half> @test_vluxseg8_mask_nxv1f16_nxv1i8(<vscale x 1 x half
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14344,7 +14164,6 @@ define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i64(<vscale x 1 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -14374,7 +14193,6 @@ define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -14404,7 +14222,6 @@ define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -14434,7 +14251,6 @@ define <vscale x 1 x float> @test_vluxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -14461,12 +14277,11 @@ entry:
 define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei64.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei64.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i64(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -14493,12 +14308,11 @@ entry:
 define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i32(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -14525,12 +14339,11 @@ entry:
 define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i16(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -14557,12 +14370,11 @@ entry:
 define <vscale x 1 x float> @test_vluxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl, <vscale x 1 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 1 x float>,<vscale x 1 x float>,<vscale x 1 x float>} @llvm.riscv.vluxseg3.mask.nxv1f32.nxv1i8(<vscale x 1 x float> %val,<vscale x 1 x float> %val,<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl, i64 1)
@@ -14590,9 +14402,9 @@ define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i64(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14623,9 +14435,9 @@ define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14656,9 +14468,9 @@ define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14689,9 +14501,9 @@ define <vscale x 1 x float> @test_vluxseg4_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vluxseg4_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14722,10 +14534,10 @@ define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i64(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14756,10 +14568,10 @@ define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14790,10 +14602,10 @@ define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14824,10 +14636,10 @@ define <vscale x 1 x float> @test_vluxseg5_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vluxseg5_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14858,11 +14670,11 @@ define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i64(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14893,11 +14705,11 @@ define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14928,11 +14740,11 @@ define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14963,11 +14775,11 @@ define <vscale x 1 x float> @test_vluxseg6_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vluxseg6_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -14998,12 +14810,12 @@ define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i64(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -15034,12 +14846,12 @@ define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -15070,12 +14882,12 @@ define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -15106,12 +14918,12 @@ define <vscale x 1 x float> @test_vluxseg7_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vluxseg7_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -15142,13 +14954,13 @@ define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i64(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -15179,13 +14991,13 @@ define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i32(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -15216,13 +15028,13 @@ define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i16(<vscale x 1 x fl
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -15253,13 +15065,13 @@ define <vscale x 1 x float> @test_vluxseg8_mask_nxv1f32_nxv1i8(<vscale x 1 x flo
 ; CHECK-LABEL: test_vluxseg8_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -15292,7 +15104,6 @@ define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x hal
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15322,7 +15133,6 @@ define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15352,7 +15162,6 @@ define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i64(<vscale x 8 x hal
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15382,7 +15191,6 @@ define <vscale x 8 x half> @test_vluxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x hal
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15409,12 +15217,11 @@ entry:
 define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i16(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15441,12 +15248,11 @@ entry:
 define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i8(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15474,10 +15280,9 @@ define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i64(<vscale x 8 x hal
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg3ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15505,10 +15310,9 @@ define <vscale x 8 x half> @test_vluxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x hal
 ; CHECK-LABEL: test_vluxseg3_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg3ei32.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15536,9 +15340,9 @@ define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i16(<vscale x 8 x hal
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -15569,9 +15373,9 @@ define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i8(<vscale x 8 x half
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -15602,11 +15406,10 @@ define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i64(<vscale x 8 x hal
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
-; CHECK-NEXT:    vmv2r.v v12, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei64.v v6, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2_v12m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i64(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15633,13 +15436,12 @@ entry:
 define <vscale x 8 x half> @test_vluxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl, <vscale x 8 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxseg4ei32.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
+; CHECK-NEXT:    vluxseg4ei32.v v6, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>,<vscale x 8 x half>} @llvm.riscv.vluxseg4.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15669,7 +15471,6 @@ define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x fl
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15699,7 +15500,6 @@ define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x flo
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15729,7 +15529,6 @@ define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i64(<vscale x 8 x fl
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v4, (a0), v16, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i64(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15759,7 +15558,6 @@ define <vscale x 8 x float> @test_vluxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x fl
 ; CHECK-NEXT:    vmv4r.v v4, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v4, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 8 x float>,<vscale x 8 x float>} @llvm.riscv.vluxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float> %val,<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl, i64 1)
@@ -15789,7 +15587,6 @@ define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x d
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -15819,7 +15616,6 @@ define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x do
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -15849,7 +15645,6 @@ define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x d
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -15879,7 +15674,6 @@ define <vscale x 2 x double> @test_vluxseg2_mask_nxv2f64_nxv2i64(<vscale x 2 x d
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg2.mask.nxv2f64.nxv2i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -15906,12 +15700,11 @@ entry:
 define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei32.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i32(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -15938,12 +15731,11 @@ entry:
 define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i8(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -15970,12 +15762,11 @@ entry:
 define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i16(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -16002,12 +15793,11 @@ entry:
 define <vscale x 2 x double> @test_vluxseg3_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei64.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x double>,<vscale x 2 x double>,<vscale x 2 x double>} @llvm.riscv.vluxseg3.mask.nxv2f64.nxv2i64(<vscale x 2 x double> %val,<vscale x 2 x double> %val,<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -16035,9 +15825,9 @@ define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i32(<vscale x 2 x d
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -16068,9 +15858,9 @@ define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i8(<vscale x 2 x do
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -16101,9 +15891,9 @@ define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i16(<vscale x 2 x d
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -16134,9 +15924,9 @@ define <vscale x 2 x double> @test_vluxseg4_mask_nxv2f64_nxv2i64(<vscale x 2 x d
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -16169,7 +15959,6 @@ define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16199,7 +15988,6 @@ define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16229,7 +16017,6 @@ define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i64(<vscale x 4 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16259,7 +16046,6 @@ define <vscale x 4 x half> @test_vluxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16287,10 +16073,9 @@ define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16317,12 +16102,11 @@ entry:
 define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i8(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16350,10 +16134,9 @@ define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i64(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg3ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16380,12 +16163,11 @@ entry:
 define <vscale x 4 x half> @test_vluxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg3.mask.nxv4f16.nxv4i16(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16412,13 +16194,12 @@ entry:
 define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vluxseg4ei32.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16446,9 +16227,9 @@ define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -16479,11 +16260,10 @@ define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i64(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg4.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16511,9 +16291,9 @@ define <vscale x 4 x half> @test_vluxseg4_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -16544,10 +16324,10 @@ define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -16578,10 +16358,10 @@ define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -16612,12 +16392,11 @@ define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i64(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
-; CHECK-NEXT:    vmv1r.v v10, v7
-; CHECK-NEXT:    vmv1r.v v11, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei64.v v7, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9_v10_v11
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg5.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16645,10 +16424,10 @@ define <vscale x 4 x half> @test_vluxseg5_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg5_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -16679,11 +16458,11 @@ define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -16714,11 +16493,11 @@ define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -16748,15 +16527,14 @@ entry:
 define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxseg6ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v17
+; CHECK-NEXT:    vluxseg6ei64.v v7, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>,<vscale x 4 x half>} @llvm.riscv.vluxseg6.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -16784,11 +16562,11 @@ define <vscale x 4 x half> @test_vluxseg6_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg6_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -16819,12 +16597,12 @@ define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -16855,12 +16633,12 @@ define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -16891,12 +16669,12 @@ define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i64(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -16927,12 +16705,12 @@ define <vscale x 4 x half> @test_vluxseg7_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg7_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -16963,13 +16741,13 @@ define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i32(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -17000,13 +16778,13 @@ define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i8(<vscale x 4 x half
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17037,13 +16815,13 @@ define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i64(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v17
@@ -17074,13 +16852,13 @@ define <vscale x 4 x half> @test_vluxseg8_mask_nxv4f16_nxv4i16(<vscale x 4 x hal
 ; CHECK-LABEL: test_vluxseg8_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17113,7 +16891,6 @@ define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17143,7 +16920,6 @@ define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17173,7 +16949,6 @@ define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v7, (a0), v9, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17203,7 +16978,6 @@ define <vscale x 2 x half> @test_vluxseg2_mask_nxv2f16_nxv2i64(<vscale x 2 x hal
 ; CHECK-NEXT:    vmv1r.v v7, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg2.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17230,12 +17004,11 @@ entry:
 define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei32.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i32(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17262,12 +17035,11 @@ entry:
 define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei8.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i8(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17294,12 +17066,11 @@ entry:
 define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v10, v9
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v11
+; CHECK-NEXT:    vluxseg3ei16.v v7, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i16(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17327,10 +17098,9 @@ define <vscale x 2 x half> @test_vluxseg3_mask_nxv2f16_nxv2i64(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg3_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v7, v8
-; CHECK-NEXT:    vmv1r.v v9, v7
+; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg3ei64.v v7, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v7_v8_v9
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg3.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17358,9 +17128,9 @@ define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17391,9 +17161,9 @@ define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17424,9 +17194,9 @@ define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17456,13 +17226,12 @@ entry:
 define <vscale x 2 x half> @test_vluxseg4_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl, <vscale x 2 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v7, v8
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxseg4ei64.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv1r.v v8, v13
+; CHECK-NEXT:    vluxseg4ei64.v v7, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>,<vscale x 2 x half>} @llvm.riscv.vluxseg4.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl, i64 1)
@@ -17490,10 +17259,10 @@ define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17524,10 +17293,10 @@ define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17558,10 +17327,10 @@ define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17592,10 +17361,10 @@ define <vscale x 2 x half> @test_vluxseg5_mask_nxv2f16_nxv2i64(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg5_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -17626,11 +17395,11 @@ define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17661,11 +17430,11 @@ define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17696,11 +17465,11 @@ define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17731,11 +17500,11 @@ define <vscale x 2 x half> @test_vluxseg6_mask_nxv2f16_nxv2i64(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg6_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -17766,12 +17535,12 @@ define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17802,12 +17571,12 @@ define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17838,12 +17607,12 @@ define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17874,12 +17643,12 @@ define <vscale x 2 x half> @test_vluxseg7_mask_nxv2f16_nxv2i64(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg7_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -17910,13 +17679,13 @@ define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i32(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17947,13 +17716,13 @@ define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i8(<vscale x 2 x half
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -17984,13 +17753,13 @@ define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i16(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v11
@@ -18021,13 +17790,13 @@ define <vscale x 2 x half> @test_vluxseg8_mask_nxv2f16_nxv2i64(<vscale x 2 x hal
 ; CHECK-LABEL: test_vluxseg8_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
 ; CHECK-NEXT:    vluxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv1r.v v8, v13
@@ -18060,7 +17829,6 @@ define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x fl
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei32.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18090,7 +17858,6 @@ define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x flo
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei8.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18120,7 +17887,6 @@ define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i64(<vscale x 4 x fl
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18150,7 +17916,6 @@ define <vscale x 4 x float> @test_vluxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x fl
 ; CHECK-NEXT:    vmv2r.v v6, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg2ei16.v v6, (a0), v10, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18177,12 +17942,11 @@ entry:
 define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei32.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei32.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i32(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18209,12 +17973,11 @@ entry:
 define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei8.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei8.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i8(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18242,10 +18005,9 @@ define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i64(<vscale x 4 x fl
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v6, v8
-; CHECK-NEXT:    vmv2r.v v10, v6
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg3ei64.v v6, (a0), v12, v0.t
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v6m2_v8m2_v10m2
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18272,12 +18034,11 @@ entry:
 define <vscale x 4 x float> @test_vluxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg3_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxseg3ei16.v v12, (a0), v10, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v14
+; CHECK-NEXT:    vluxseg3ei16.v v6, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg3.mask.nxv4f32.nxv4i16(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18305,9 +18066,9 @@ define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i32(<vscale x 4 x fl
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -18338,9 +18099,9 @@ define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i8(<vscale x 4 x flo
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14
@@ -18370,13 +18131,12 @@ entry:
 define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl, <vscale x 4 x i1> %mask) {
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v6, v8
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxseg4ei64.v v16, (a0), v12, v0.t
-; CHECK-NEXT:    vmv2r.v v8, v18
+; CHECK-NEXT:    vluxseg4ei64.v v6, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>,<vscale x 4 x float>} @llvm.riscv.vluxseg4.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl, i64 1)
@@ -18404,9 +18164,9 @@ define <vscale x 4 x float> @test_vluxseg4_mask_nxv4f32_nxv4i16(<vscale x 4 x fl
 ; CHECK-LABEL: test_vluxseg4_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
 ; CHECK-NEXT:    vluxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    vmv2r.v v8, v14

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
index 199fc6fa62432..aa8850f8210e8 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vpgather-sdnode.ll
@@ -267,19 +267,18 @@ define <vscale x 32 x i8> @vpgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8
 ; RV32-NEXT:    vsetvli a5, zero, e8, mf2, ta, ma
 ; RV32-NEXT:    vslidedown.vx v0, v0, a3
 ; RV32-NEXT:    vsetvli a3, zero, e32, m8, ta, ma
-; RV32-NEXT:    vsext.vf4 v24, v10
+; RV32-NEXT:    vsext.vf4 v16, v10
 ; RV32-NEXT:    vsetvli zero, a4, e8, m2, ta, ma
-; RV32-NEXT:    vluxei32.v v18, (a0), v24, v0.t
+; RV32-NEXT:    vluxei32.v v10, (a0), v16, v0.t
 ; RV32-NEXT:    bltu a1, a2, .LBB12_2
 ; RV32-NEXT:  # %bb.1:
 ; RV32-NEXT:    mv a1, a2
 ; RV32-NEXT:  .LBB12_2:
 ; RV32-NEXT:    vsetvli a2, zero, e32, m8, ta, ma
-; RV32-NEXT:    vsext.vf4 v24, v8
+; RV32-NEXT:    vsext.vf4 v16, v8
 ; RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; RV32-NEXT:    vmv1r.v v0, v12
-; RV32-NEXT:    vluxei32.v v16, (a0), v24, v0.t
-; RV32-NEXT:    vmv4r.v v8, v16
+; RV32-NEXT:    vluxei32.v v8, (a0), v16, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: vpgather_baseidx_nxv32i8:
@@ -290,7 +289,7 @@ define <vscale x 32 x i8> @vpgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8
 ; RV64-NEXT:    sltu a5, a1, a3
 ; RV64-NEXT:    addi a5, a5, -1
 ; RV64-NEXT:    and a3, a5, a3
-; RV64-NEXT:    vmv1r.v v17, v0
+; RV64-NEXT:    vmv1r.v v13, v0
 ; RV64-NEXT:    mv a5, a3
 ; RV64-NEXT:    bltu a3, a2, .LBB12_2
 ; RV64-NEXT:  # %bb.1:
@@ -298,12 +297,12 @@ define <vscale x 32 x i8> @vpgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8
 ; RV64-NEXT:  .LBB12_2:
 ; RV64-NEXT:    srli a6, a2, 2
 ; RV64-NEXT:    vsetvli a7, zero, e8, mf2, ta, ma
-; RV64-NEXT:    vslidedown.vx v16, v17, a6
+; RV64-NEXT:    vslidedown.vx v12, v13, a6
 ; RV64-NEXT:    vsetvli a6, zero, e64, m8, ta, ma
-; RV64-NEXT:    vsext.vf8 v24, v10
+; RV64-NEXT:    vsext.vf8 v16, v10
 ; RV64-NEXT:    vsetvli zero, a5, e8, m1, ta, ma
-; RV64-NEXT:    vmv1r.v v0, v16
-; RV64-NEXT:    vluxei64.v v14, (a0), v24, v0.t
+; RV64-NEXT:    vmv1r.v v0, v12
+; RV64-NEXT:    vluxei64.v v10, (a0), v16, v0.t
 ; RV64-NEXT:    bltu a1, a4, .LBB12_4
 ; RV64-NEXT:  # %bb.3:
 ; RV64-NEXT:    mv a1, a4
@@ -314,31 +313,30 @@ define <vscale x 32 x i8> @vpgather_baseidx_nxv32i8(ptr %base, <vscale x 32 x i8
 ; RV64-NEXT:    and a5, a5, a4
 ; RV64-NEXT:    srli a4, a2, 3
 ; RV64-NEXT:    vsetvli a6, zero, e8, mf4, ta, ma
-; RV64-NEXT:    vslidedown.vx v0, v17, a4
+; RV64-NEXT:    vslidedown.vx v0, v13, a4
 ; RV64-NEXT:    vsetvli a6, zero, e64, m8, ta, ma
-; RV64-NEXT:    vsext.vf8 v24, v9
+; RV64-NEXT:    vsext.vf8 v16, v9
 ; RV64-NEXT:    vsetvli zero, a5, e8, m1, ta, ma
-; RV64-NEXT:    vluxei64.v v13, (a0), v24, v0.t
+; RV64-NEXT:    vluxei64.v v9, (a0), v16, v0.t
 ; RV64-NEXT:    bltu a1, a2, .LBB12_6
 ; RV64-NEXT:  # %bb.5:
 ; RV64-NEXT:    mv a1, a2
 ; RV64-NEXT:  .LBB12_6:
 ; RV64-NEXT:    vsetvli a5, zero, e64, m8, ta, ma
-; RV64-NEXT:    vsext.vf8 v24, v8
+; RV64-NEXT:    vsext.vf8 v16, v8
 ; RV64-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; RV64-NEXT:    vmv1r.v v0, v17
-; RV64-NEXT:    vluxei64.v v12, (a0), v24, v0.t
+; RV64-NEXT:    vmv1r.v v0, v13
+; RV64-NEXT:    vluxei64.v v8, (a0), v16, v0.t
 ; RV64-NEXT:    sub a1, a3, a2
 ; RV64-NEXT:    sltu a2, a3, a1
 ; RV64-NEXT:    addi a2, a2, -1
 ; RV64-NEXT:    and a1, a2, a1
 ; RV64-NEXT:    vsetvli a2, zero, e8, mf4, ta, ma
-; RV64-NEXT:    vslidedown.vx v0, v16, a4
+; RV64-NEXT:    vslidedown.vx v0, v12, a4
 ; RV64-NEXT:    vsetvli a2, zero, e64, m8, ta, ma
 ; RV64-NEXT:    vsext.vf8 v16, v11
 ; RV64-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; RV64-NEXT:    vluxei64.v v15, (a0), v16, v0.t
-; RV64-NEXT:    vmv4r.v v8, v12
+; RV64-NEXT:    vluxei64.v v11, (a0), v16, v0.t
 ; RV64-NEXT:    ret
   %ptrs = getelementptr inbounds i8, ptr %base, <vscale x 32 x i8> %idxs
   %v = call <vscale x 32 x i8> @llvm.vp.gather.nxv32i8.nxv32p0(<vscale x 32 x ptr> %ptrs, <vscale x 32 x i1> %m, i32 %evl)

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
index 09d926787d522..0c0a3dc9675b1 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vrgatherei16-subreg-liveness.ll
@@ -12,7 +12,6 @@
 define internal void @foo(<vscale x 1 x i16> %v15, <vscale x 1 x i16> %0, <vscale x 1 x i16> %vs12.i.i.i, <vscale x 1 x i16> %1, <vscale x 8 x i8> %v37) {
 ; NOSUBREG-LABEL: foo:
 ; NOSUBREG:       # %bb.0: # %loopIR.preheader.i.i
-; NOSUBREG-NEXT:    # kill: def $v10 killed $v10 def $v10m2
 ; NOSUBREG-NEXT:    vsetvli a0, zero, e16, m2, ta, ma
 ; NOSUBREG-NEXT:    vmv.v.i v14, 0
 ; NOSUBREG-NEXT:    vsetvli zero, zero, e8, m1, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
index f1b84da0351a9..c0fa9042871c5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.ll
@@ -91,11 +91,13 @@ define <vscale x 1 x double> @test3(i64 %avl, i8 zeroext %cond, <vscale x 1 x do
 ; CHECK-NEXT:    vsetvli a0, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfadd.vv v9, v8, v9
 ; CHECK-NEXT:    vfmul.vv v8, v9, v8
+; CHECK-NEXT:    # implicit-def: $x10
 ; CHECK-NEXT:    ret
 ; CHECK-NEXT:  .LBB2_2: # %if.else
 ; CHECK-NEXT:    vsetvli a0, a0, e64, m1, ta, ma
 ; CHECK-NEXT:    vfsub.vv v9, v8, v9
 ; CHECK-NEXT:    vfmul.vv v8, v9, v8
+; CHECK-NEXT:    # implicit-def: $x10
 ; CHECK-NEXT:    ret
 entry:
   %tobool = icmp eq i8 %cond, 0

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
index 10e41682034e2..0cfb36037221c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsitofp-vp.ll
@@ -306,15 +306,7 @@ declare <vscale x 32 x half> @llvm.vp.sitofp.nxv32f16.nxv32i32(<vscale x 32 x i3
 define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vsitofp_nxv32f16_nxv32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
-; CHECK-NEXT:    sub sp, sp, a1
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
 ; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    addi a1, sp, 16
-; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    srli a2, a1, 2
 ; CHECK-NEXT:    vsetvli a3, zero, e8, mf2, ta, ma
@@ -325,20 +317,15 @@ define <vscale x 32 x half> @vsitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvt.f.x.w v12, v16, v0.t
+; CHECK-NEXT:    vfncvt.f.x.w v28, v16, v0.t
 ; CHECK-NEXT:    bltu a0, a1, .LBB25_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a0, a1
 ; CHECK-NEXT:  .LBB25_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vfncvt.f.x.w v8, v16, v0.t
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 3
-; CHECK-NEXT:    add sp, sp, a0
-; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    vfncvt.f.x.w v24, v8, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.sitofp.nxv32f16.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll
index da16846c7d673..076196c7bb665 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv32.ll
@@ -8,7 +8,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16>,<vs
 define void @test_vsoxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -22,7 +21,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -39,7 +37,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16>,<vsc
 define void @test_vsoxseg2_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -53,7 +50,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -70,7 +66,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16>,<vs
 define void @test_vsoxseg2_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v16
@@ -83,7 +78,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v16, v0.t
@@ -99,7 +93,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x
 define void @test_vsoxseg2_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -113,7 +106,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -130,7 +122,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale
 define void @test_vsoxseg2_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -144,7 +135,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -161,7 +151,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale
 define void @test_vsoxseg2_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -175,7 +164,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -193,8 +181,8 @@ define void @test_vsoxseg3_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -207,8 +195,8 @@ define void @test_vsoxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -224,8 +212,8 @@ define void @test_vsoxseg3_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -238,8 +226,8 @@ define void @test_vsoxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -255,8 +243,8 @@ define void @test_vsoxseg3_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -269,8 +257,8 @@ define void @test_vsoxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -286,9 +274,9 @@ define void @test_vsoxseg4_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -301,9 +289,9 @@ define void @test_vsoxseg4_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -319,9 +307,9 @@ define void @test_vsoxseg4_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -334,9 +322,9 @@ define void @test_vsoxseg4_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -352,9 +340,9 @@ define void @test_vsoxseg4_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -367,9 +355,9 @@ define void @test_vsoxseg4_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -385,10 +373,10 @@ define void @test_vsoxseg5_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -401,10 +389,10 @@ define void @test_vsoxseg5_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -420,10 +408,10 @@ define void @test_vsoxseg5_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -436,10 +424,10 @@ define void @test_vsoxseg5_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -455,10 +443,10 @@ define void @test_vsoxseg5_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -471,10 +459,10 @@ define void @test_vsoxseg5_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -490,11 +478,11 @@ define void @test_vsoxseg6_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -507,11 +495,11 @@ define void @test_vsoxseg6_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -527,11 +515,11 @@ define void @test_vsoxseg6_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -544,11 +532,11 @@ define void @test_vsoxseg6_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -564,11 +552,11 @@ define void @test_vsoxseg6_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -581,11 +569,11 @@ define void @test_vsoxseg6_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -601,12 +589,12 @@ define void @test_vsoxseg7_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -619,12 +607,12 @@ define void @test_vsoxseg7_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -640,12 +628,12 @@ define void @test_vsoxseg7_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -658,12 +646,12 @@ define void @test_vsoxseg7_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -679,12 +667,12 @@ define void @test_vsoxseg7_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -697,12 +685,12 @@ define void @test_vsoxseg7_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -718,13 +706,13 @@ define void @test_vsoxseg8_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -737,13 +725,13 @@ define void @test_vsoxseg8_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -759,13 +747,13 @@ define void @test_vsoxseg8_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -778,13 +766,13 @@ define void @test_vsoxseg8_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -800,13 +788,13 @@ define void @test_vsoxseg8_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -819,13 +807,13 @@ define void @test_vsoxseg8_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -840,7 +828,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vsca
 define void @test_vsoxseg2_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei16.v v8, (a0), v12
@@ -853,7 +840,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei16.v v8, (a0), v12, v0.t
@@ -869,7 +855,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscal
 define void @test_vsoxseg2_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -883,7 +868,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -900,7 +884,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vsca
 define void @test_vsoxseg2_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v16
@@ -913,7 +896,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v16, v0.t
@@ -929,11 +911,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vsca
 define void @test_vsoxseg3_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei16.v v16, (a0), v12
+; CHECK-NEXT:    vsoxseg3ei16.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl)
@@ -943,11 +925,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei16.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsoxseg3ei16.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
@@ -961,8 +943,8 @@ define void @test_vsoxseg3_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -975,8 +957,8 @@ define void @test_vsoxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -991,7 +973,6 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vsca
 define void @test_vsoxseg3_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1005,7 +986,6 @@ entry:
 define void @test_vsoxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1023,9 +1003,9 @@ define void @test_vsoxseg4_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -1038,9 +1018,9 @@ define void @test_vsoxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -1056,9 +1036,9 @@ define void @test_vsoxseg4_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -1071,9 +1051,9 @@ define void @test_vsoxseg4_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -1088,7 +1068,6 @@ declare void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vsca
 define void @test_vsoxseg4_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -1103,7 +1082,6 @@ entry:
 define void @test_vsoxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -1121,7 +1099,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscal
 define void @test_vsoxseg2_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1135,7 +1112,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1152,7 +1128,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale
 define void @test_vsoxseg2_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1166,7 +1141,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1183,7 +1157,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscal
 define void @test_vsoxseg2_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1197,7 +1170,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1215,8 +1187,8 @@ define void @test_vsoxseg3_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1229,8 +1201,8 @@ define void @test_vsoxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1246,8 +1218,8 @@ define void @test_vsoxseg3_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1260,8 +1232,8 @@ define void @test_vsoxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1277,8 +1249,8 @@ define void @test_vsoxseg3_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1291,8 +1263,8 @@ define void @test_vsoxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1308,9 +1280,9 @@ define void @test_vsoxseg4_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1323,9 +1295,9 @@ define void @test_vsoxseg4_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1341,9 +1313,9 @@ define void @test_vsoxseg4_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1356,9 +1328,9 @@ define void @test_vsoxseg4_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1374,9 +1346,9 @@ define void @test_vsoxseg4_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1389,9 +1361,9 @@ define void @test_vsoxseg4_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1407,10 +1379,10 @@ define void @test_vsoxseg5_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1423,10 +1395,10 @@ define void @test_vsoxseg5_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1442,10 +1414,10 @@ define void @test_vsoxseg5_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1458,10 +1430,10 @@ define void @test_vsoxseg5_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1477,10 +1449,10 @@ define void @test_vsoxseg5_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1493,10 +1465,10 @@ define void @test_vsoxseg5_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1512,11 +1484,11 @@ define void @test_vsoxseg6_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1529,11 +1501,11 @@ define void @test_vsoxseg6_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1549,11 +1521,11 @@ define void @test_vsoxseg6_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1566,11 +1538,11 @@ define void @test_vsoxseg6_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1586,11 +1558,11 @@ define void @test_vsoxseg6_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1603,11 +1575,11 @@ define void @test_vsoxseg6_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1623,12 +1595,12 @@ define void @test_vsoxseg7_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1641,12 +1613,12 @@ define void @test_vsoxseg7_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1662,12 +1634,12 @@ define void @test_vsoxseg7_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1680,12 +1652,12 @@ define void @test_vsoxseg7_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1701,12 +1673,12 @@ define void @test_vsoxseg7_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1719,12 +1691,12 @@ define void @test_vsoxseg7_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1740,13 +1712,13 @@ define void @test_vsoxseg8_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1759,13 +1731,13 @@ define void @test_vsoxseg8_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1781,13 +1753,13 @@ define void @test_vsoxseg8_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1800,13 +1772,13 @@ define void @test_vsoxseg8_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1822,13 +1794,13 @@ define void @test_vsoxseg8_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1841,13 +1813,13 @@ define void @test_vsoxseg8_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1862,7 +1834,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscal
 define void @test_vsoxseg2_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1876,7 +1847,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1893,7 +1863,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale
 define void @test_vsoxseg2_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1907,7 +1876,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1924,7 +1892,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscal
 define void @test_vsoxseg2_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v10
@@ -1937,7 +1904,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v10, v0.t
@@ -1954,8 +1920,8 @@ define void @test_vsoxseg3_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1968,8 +1934,8 @@ define void @test_vsoxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1985,8 +1951,8 @@ define void @test_vsoxseg3_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1999,8 +1965,8 @@ define void @test_vsoxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2015,11 +1981,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscal
 define void @test_vsoxseg3_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
@@ -2029,11 +1995,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
@@ -2047,9 +2013,9 @@ define void @test_vsoxseg4_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2062,9 +2028,9 @@ define void @test_vsoxseg4_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2080,9 +2046,9 @@ define void @test_vsoxseg4_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2095,9 +2061,9 @@ define void @test_vsoxseg4_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2113,9 +2079,9 @@ define void @test_vsoxseg4_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -2128,9 +2094,9 @@ define void @test_vsoxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -2146,10 +2112,10 @@ define void @test_vsoxseg5_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2162,10 +2128,10 @@ define void @test_vsoxseg5_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2181,10 +2147,10 @@ define void @test_vsoxseg5_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2197,10 +2163,10 @@ define void @test_vsoxseg5_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2216,10 +2182,10 @@ define void @test_vsoxseg5_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -2232,10 +2198,10 @@ define void @test_vsoxseg5_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -2251,11 +2217,11 @@ define void @test_vsoxseg6_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2268,11 +2234,11 @@ define void @test_vsoxseg6_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2288,11 +2254,11 @@ define void @test_vsoxseg6_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2305,11 +2271,11 @@ define void @test_vsoxseg6_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2325,11 +2291,11 @@ define void @test_vsoxseg6_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -2342,11 +2308,11 @@ define void @test_vsoxseg6_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -2362,12 +2328,12 @@ define void @test_vsoxseg7_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2380,12 +2346,12 @@ define void @test_vsoxseg7_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2401,12 +2367,12 @@ define void @test_vsoxseg7_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2419,12 +2385,12 @@ define void @test_vsoxseg7_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2440,12 +2406,12 @@ define void @test_vsoxseg7_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -2458,12 +2424,12 @@ define void @test_vsoxseg7_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -2479,13 +2445,13 @@ define void @test_vsoxseg8_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2498,13 +2464,13 @@ define void @test_vsoxseg8_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2520,13 +2486,13 @@ define void @test_vsoxseg8_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2539,13 +2505,13 @@ define void @test_vsoxseg8_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2561,13 +2527,13 @@ define void @test_vsoxseg8_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -2580,13 +2546,13 @@ define void @test_vsoxseg8_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -2601,7 +2567,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale
 define void @test_vsoxseg2_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2615,7 +2580,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2632,7 +2596,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscal
 define void @test_vsoxseg2_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2646,7 +2609,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2663,7 +2625,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscal
 define void @test_vsoxseg2_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2677,7 +2638,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2695,8 +2655,8 @@ define void @test_vsoxseg3_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2709,8 +2669,8 @@ define void @test_vsoxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2726,8 +2686,8 @@ define void @test_vsoxseg3_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2740,8 +2700,8 @@ define void @test_vsoxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2757,8 +2717,8 @@ define void @test_vsoxseg3_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2771,8 +2731,8 @@ define void @test_vsoxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2788,9 +2748,9 @@ define void @test_vsoxseg4_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2803,9 +2763,9 @@ define void @test_vsoxseg4_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2821,9 +2781,9 @@ define void @test_vsoxseg4_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2836,9 +2796,9 @@ define void @test_vsoxseg4_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2854,9 +2814,9 @@ define void @test_vsoxseg4_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2869,9 +2829,9 @@ define void @test_vsoxseg4_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2887,10 +2847,10 @@ define void @test_vsoxseg5_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2903,10 +2863,10 @@ define void @test_vsoxseg5_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2922,10 +2882,10 @@ define void @test_vsoxseg5_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2938,10 +2898,10 @@ define void @test_vsoxseg5_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2957,10 +2917,10 @@ define void @test_vsoxseg5_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2973,10 +2933,10 @@ define void @test_vsoxseg5_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2992,11 +2952,11 @@ define void @test_vsoxseg6_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3009,11 +2969,11 @@ define void @test_vsoxseg6_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3029,11 +2989,11 @@ define void @test_vsoxseg6_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3046,11 +3006,11 @@ define void @test_vsoxseg6_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3066,11 +3026,11 @@ define void @test_vsoxseg6_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3083,11 +3043,11 @@ define void @test_vsoxseg6_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3103,12 +3063,12 @@ define void @test_vsoxseg7_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3121,12 +3081,12 @@ define void @test_vsoxseg7_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3142,12 +3102,12 @@ define void @test_vsoxseg7_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3160,12 +3120,12 @@ define void @test_vsoxseg7_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3181,12 +3141,12 @@ define void @test_vsoxseg7_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3199,12 +3159,12 @@ define void @test_vsoxseg7_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3220,13 +3180,13 @@ define void @test_vsoxseg8_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3239,13 +3199,13 @@ define void @test_vsoxseg8_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3261,13 +3221,13 @@ define void @test_vsoxseg8_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3280,13 +3240,13 @@ define void @test_vsoxseg8_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3302,13 +3262,13 @@ define void @test_vsoxseg8_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3321,13 +3281,13 @@ define void @test_vsoxseg8_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3342,7 +3302,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscal
 define void @test_vsoxseg2_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -3356,7 +3315,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -3373,7 +3331,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale
 define void @test_vsoxseg2_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -3387,7 +3344,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -3404,7 +3360,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscal
 define void @test_vsoxseg2_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v12
@@ -3417,7 +3372,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v12, v0.t
@@ -3434,8 +3388,8 @@ define void @test_vsoxseg3_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3448,8 +3402,8 @@ define void @test_vsoxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3465,8 +3419,8 @@ define void @test_vsoxseg3_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3479,8 +3433,8 @@ define void @test_vsoxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3495,11 +3449,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscal
 define void @test_vsoxseg3_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
@@ -3509,11 +3463,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
@@ -3527,10 +3481,10 @@ define void @test_vsoxseg4_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
 entry:
@@ -3542,9 +3496,9 @@ define void @test_vsoxseg4_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3560,9 +3514,9 @@ define void @test_vsoxseg4_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3575,9 +3529,9 @@ define void @test_vsoxseg4_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3593,9 +3547,9 @@ define void @test_vsoxseg4_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -3608,9 +3562,9 @@ define void @test_vsoxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -3625,7 +3579,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg2_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei16.v v8, (a0), v10
@@ -3638,7 +3591,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei16.v v8, (a0), v10, v0.t
@@ -3654,7 +3606,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x
 define void @test_vsoxseg2_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -3668,7 +3619,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -3685,7 +3635,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg2_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v12
@@ -3698,7 +3647,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v12, v0.t
@@ -3714,11 +3662,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg3_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10
+; CHECK-NEXT:    vsoxseg3ei16.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
@@ -3728,11 +3676,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsoxseg3ei16.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
@@ -3746,8 +3694,8 @@ define void @test_vsoxseg3_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3760,8 +3708,8 @@ define void @test_vsoxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3776,7 +3724,6 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg3_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -3790,7 +3737,6 @@ entry:
 define void @test_vsoxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -3808,9 +3754,9 @@ define void @test_vsoxseg4_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3823,9 +3769,9 @@ define void @test_vsoxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3841,9 +3787,9 @@ define void @test_vsoxseg4_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3856,9 +3802,9 @@ define void @test_vsoxseg4_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3873,7 +3819,6 @@ declare void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg4_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3888,7 +3833,6 @@ entry:
 define void @test_vsoxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3907,10 +3851,10 @@ define void @test_vsoxseg5_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3923,10 +3867,10 @@ define void @test_vsoxseg5_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3942,10 +3886,10 @@ define void @test_vsoxseg5_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3958,10 +3902,10 @@ define void @test_vsoxseg5_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3976,13 +3920,13 @@ declare void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg5_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsoxseg5ei32.v v16, (a0), v12
+; CHECK-NEXT:    vsoxseg5ei32.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
@@ -3992,13 +3936,13 @@ entry:
 define void @test_vsoxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsoxseg5ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsoxseg5ei32.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
@@ -4012,11 +3956,11 @@ define void @test_vsoxseg6_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -4029,11 +3973,11 @@ define void @test_vsoxseg6_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -4049,11 +3993,11 @@ define void @test_vsoxseg6_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4066,11 +4010,11 @@ define void @test_vsoxseg6_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4086,11 +4030,11 @@ define void @test_vsoxseg6_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -4103,11 +4047,11 @@ define void @test_vsoxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -4123,12 +4067,12 @@ define void @test_vsoxseg7_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -4141,12 +4085,12 @@ define void @test_vsoxseg7_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -4162,12 +4106,12 @@ define void @test_vsoxseg7_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4180,12 +4124,12 @@ define void @test_vsoxseg7_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4201,12 +4145,12 @@ define void @test_vsoxseg7_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -4219,12 +4163,12 @@ define void @test_vsoxseg7_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -4240,13 +4184,13 @@ define void @test_vsoxseg8_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -4259,13 +4203,13 @@ define void @test_vsoxseg8_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -4281,13 +4225,13 @@ define void @test_vsoxseg8_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4300,13 +4244,13 @@ define void @test_vsoxseg8_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4322,13 +4266,13 @@ define void @test_vsoxseg8_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -4341,13 +4285,13 @@ define void @test_vsoxseg8_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -4362,7 +4306,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32>,<vscal
 define void @test_vsoxseg2_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4376,7 +4319,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4393,7 +4335,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32>,<vscale
 define void @test_vsoxseg2_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4407,7 +4348,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4424,7 +4364,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32>,<vscal
 define void @test_vsoxseg2_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4438,7 +4377,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4455,7 +4393,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale
 define void @test_vsoxseg2_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -4469,7 +4406,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -4486,7 +4422,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x
 define void @test_vsoxseg2_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -4500,7 +4435,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -4517,7 +4451,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale
 define void @test_vsoxseg2_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v10
@@ -4530,7 +4463,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v10, v0.t
@@ -4547,8 +4479,8 @@ define void @test_vsoxseg3_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4561,8 +4493,8 @@ define void @test_vsoxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4578,8 +4510,8 @@ define void @test_vsoxseg3_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4592,8 +4524,8 @@ define void @test_vsoxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4608,11 +4540,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale
 define void @test_vsoxseg3_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
@@ -4622,11 +4554,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
@@ -4640,9 +4572,9 @@ define void @test_vsoxseg4_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4655,9 +4587,9 @@ define void @test_vsoxseg4_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4673,9 +4605,9 @@ define void @test_vsoxseg4_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4688,9 +4620,9 @@ define void @test_vsoxseg4_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4706,9 +4638,9 @@ define void @test_vsoxseg4_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -4721,9 +4653,9 @@ define void @test_vsoxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -4739,10 +4671,10 @@ define void @test_vsoxseg5_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4755,10 +4687,10 @@ define void @test_vsoxseg5_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4774,10 +4706,10 @@ define void @test_vsoxseg5_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4790,10 +4722,10 @@ define void @test_vsoxseg5_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4809,10 +4741,10 @@ define void @test_vsoxseg5_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -4825,10 +4757,10 @@ define void @test_vsoxseg5_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -4844,11 +4776,11 @@ define void @test_vsoxseg6_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4861,11 +4793,11 @@ define void @test_vsoxseg6_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4881,11 +4813,11 @@ define void @test_vsoxseg6_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4898,11 +4830,11 @@ define void @test_vsoxseg6_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4918,11 +4850,11 @@ define void @test_vsoxseg6_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -4935,11 +4867,11 @@ define void @test_vsoxseg6_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -4955,12 +4887,12 @@ define void @test_vsoxseg7_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4973,12 +4905,12 @@ define void @test_vsoxseg7_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4994,12 +4926,12 @@ define void @test_vsoxseg7_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5012,12 +4944,12 @@ define void @test_vsoxseg7_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5033,12 +4965,12 @@ define void @test_vsoxseg7_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -5051,12 +4983,12 @@ define void @test_vsoxseg7_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -5072,13 +5004,13 @@ define void @test_vsoxseg8_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5091,13 +5023,13 @@ define void @test_vsoxseg8_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5113,13 +5045,13 @@ define void @test_vsoxseg8_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5132,13 +5064,13 @@ define void @test_vsoxseg8_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5154,13 +5086,13 @@ define void @test_vsoxseg8_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -5173,13 +5105,13 @@ define void @test_vsoxseg8_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -5194,7 +5126,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale
 define void @test_vsoxseg2_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -5208,7 +5139,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -5225,7 +5155,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscal
 define void @test_vsoxseg2_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -5239,7 +5168,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -5256,7 +5184,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscal
 define void @test_vsoxseg2_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -5270,7 +5197,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -5288,8 +5214,8 @@ define void @test_vsoxseg3_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5302,8 +5228,8 @@ define void @test_vsoxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5319,8 +5245,8 @@ define void @test_vsoxseg3_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5333,8 +5259,8 @@ define void @test_vsoxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5350,8 +5276,8 @@ define void @test_vsoxseg3_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5364,8 +5290,8 @@ define void @test_vsoxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5381,9 +5307,9 @@ define void @test_vsoxseg4_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5396,9 +5322,9 @@ define void @test_vsoxseg4_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5414,9 +5340,9 @@ define void @test_vsoxseg4_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5429,9 +5355,9 @@ define void @test_vsoxseg4_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5447,9 +5373,9 @@ define void @test_vsoxseg4_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5462,9 +5388,9 @@ define void @test_vsoxseg4_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5480,10 +5406,10 @@ define void @test_vsoxseg5_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5496,10 +5422,10 @@ define void @test_vsoxseg5_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5515,10 +5441,10 @@ define void @test_vsoxseg5_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5531,10 +5457,10 @@ define void @test_vsoxseg5_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5550,10 +5476,10 @@ define void @test_vsoxseg5_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5566,10 +5492,10 @@ define void @test_vsoxseg5_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5585,11 +5511,11 @@ define void @test_vsoxseg6_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5602,11 +5528,11 @@ define void @test_vsoxseg6_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5622,11 +5548,11 @@ define void @test_vsoxseg6_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5639,11 +5565,11 @@ define void @test_vsoxseg6_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5659,11 +5585,11 @@ define void @test_vsoxseg6_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5676,11 +5602,11 @@ define void @test_vsoxseg6_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5696,12 +5622,12 @@ define void @test_vsoxseg7_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5714,12 +5640,12 @@ define void @test_vsoxseg7_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5735,12 +5661,12 @@ define void @test_vsoxseg7_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5753,12 +5679,12 @@ define void @test_vsoxseg7_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5774,12 +5700,12 @@ define void @test_vsoxseg7_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5792,12 +5718,12 @@ define void @test_vsoxseg7_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5813,13 +5739,13 @@ define void @test_vsoxseg8_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5832,13 +5758,13 @@ define void @test_vsoxseg8_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5854,13 +5780,13 @@ define void @test_vsoxseg8_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5873,13 +5799,13 @@ define void @test_vsoxseg8_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5895,13 +5821,13 @@ define void @test_vsoxseg8_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5914,13 +5840,13 @@ define void @test_vsoxseg8_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5935,7 +5861,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8>,<vsca
 define void @test_vsoxseg2_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei16.v v8, (a0), v16
@@ -5948,7 +5873,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei16.v v8, (a0), v16, v0.t
@@ -5964,7 +5888,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8>,<vscal
 define void @test_vsoxseg2_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -5978,7 +5901,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -5995,7 +5917,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale
 define void @test_vsoxseg2_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -6009,7 +5930,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -6026,7 +5946,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x
 define void @test_vsoxseg2_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -6040,7 +5959,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -6057,7 +5975,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale
 define void @test_vsoxseg2_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -6071,7 +5988,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -6089,8 +6005,8 @@ define void @test_vsoxseg3_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6103,8 +6019,8 @@ define void @test_vsoxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6120,8 +6036,8 @@ define void @test_vsoxseg3_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6134,8 +6050,8 @@ define void @test_vsoxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6151,8 +6067,8 @@ define void @test_vsoxseg3_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6165,8 +6081,8 @@ define void @test_vsoxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6182,9 +6098,9 @@ define void @test_vsoxseg4_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6197,9 +6113,9 @@ define void @test_vsoxseg4_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6215,9 +6131,9 @@ define void @test_vsoxseg4_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6230,9 +6146,9 @@ define void @test_vsoxseg4_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6248,9 +6164,9 @@ define void @test_vsoxseg4_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6263,9 +6179,9 @@ define void @test_vsoxseg4_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6281,10 +6197,10 @@ define void @test_vsoxseg5_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6297,10 +6213,10 @@ define void @test_vsoxseg5_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6316,10 +6232,10 @@ define void @test_vsoxseg5_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6332,10 +6248,10 @@ define void @test_vsoxseg5_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6351,10 +6267,10 @@ define void @test_vsoxseg5_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6367,10 +6283,10 @@ define void @test_vsoxseg5_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6386,11 +6302,11 @@ define void @test_vsoxseg6_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6403,11 +6319,11 @@ define void @test_vsoxseg6_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6423,11 +6339,11 @@ define void @test_vsoxseg6_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6440,11 +6356,11 @@ define void @test_vsoxseg6_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6460,11 +6376,11 @@ define void @test_vsoxseg6_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6477,11 +6393,11 @@ define void @test_vsoxseg6_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6497,12 +6413,12 @@ define void @test_vsoxseg7_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6515,12 +6431,12 @@ define void @test_vsoxseg7_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6536,12 +6452,12 @@ define void @test_vsoxseg7_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6554,12 +6470,12 @@ define void @test_vsoxseg7_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6575,12 +6491,12 @@ define void @test_vsoxseg7_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6593,12 +6509,12 @@ define void @test_vsoxseg7_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6614,13 +6530,13 @@ define void @test_vsoxseg8_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6633,13 +6549,13 @@ define void @test_vsoxseg8_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6655,13 +6571,13 @@ define void @test_vsoxseg8_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6674,13 +6590,13 @@ define void @test_vsoxseg8_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6696,13 +6612,13 @@ define void @test_vsoxseg8_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6715,13 +6631,13 @@ define void @test_vsoxseg8_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6736,7 +6652,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscal
 define void @test_vsoxseg2_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -6750,7 +6665,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -6767,7 +6681,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale
 define void @test_vsoxseg2_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -6781,7 +6694,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -6798,7 +6710,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscal
 define void @test_vsoxseg2_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -6812,7 +6723,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -6830,8 +6740,8 @@ define void @test_vsoxseg3_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6844,8 +6754,8 @@ define void @test_vsoxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6861,8 +6771,8 @@ define void @test_vsoxseg3_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6875,8 +6785,8 @@ define void @test_vsoxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6892,8 +6802,8 @@ define void @test_vsoxseg3_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6906,8 +6816,8 @@ define void @test_vsoxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6923,9 +6833,9 @@ define void @test_vsoxseg4_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6938,9 +6848,9 @@ define void @test_vsoxseg4_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6956,9 +6866,9 @@ define void @test_vsoxseg4_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6971,9 +6881,9 @@ define void @test_vsoxseg4_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6989,9 +6899,9 @@ define void @test_vsoxseg4_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7004,9 +6914,9 @@ define void @test_vsoxseg4_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7022,10 +6932,10 @@ define void @test_vsoxseg5_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7038,10 +6948,10 @@ define void @test_vsoxseg5_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7057,10 +6967,10 @@ define void @test_vsoxseg5_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7073,10 +6983,10 @@ define void @test_vsoxseg5_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7092,10 +7002,10 @@ define void @test_vsoxseg5_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7108,10 +7018,10 @@ define void @test_vsoxseg5_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7127,11 +7037,11 @@ define void @test_vsoxseg6_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7144,11 +7054,11 @@ define void @test_vsoxseg6_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7164,11 +7074,11 @@ define void @test_vsoxseg6_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7181,11 +7091,11 @@ define void @test_vsoxseg6_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7201,11 +7111,11 @@ define void @test_vsoxseg6_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7218,11 +7128,11 @@ define void @test_vsoxseg6_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7238,13 +7148,13 @@ define void @test_vsoxseg7_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
 entry:
@@ -7256,12 +7166,12 @@ define void @test_vsoxseg7_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7277,12 +7187,12 @@ define void @test_vsoxseg7_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7295,12 +7205,12 @@ define void @test_vsoxseg7_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7316,12 +7226,12 @@ define void @test_vsoxseg7_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7334,12 +7244,12 @@ define void @test_vsoxseg7_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7355,13 +7265,13 @@ define void @test_vsoxseg8_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7374,13 +7284,13 @@ define void @test_vsoxseg8_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7396,13 +7306,13 @@ define void @test_vsoxseg8_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7415,13 +7325,13 @@ define void @test_vsoxseg8_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7437,13 +7347,13 @@ define void @test_vsoxseg8_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7456,13 +7366,13 @@ define void @test_vsoxseg8_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7477,7 +7387,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscal
 define void @test_vsoxseg2_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -7491,7 +7400,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -7508,7 +7416,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale
 define void @test_vsoxseg2_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -7522,7 +7429,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -7539,7 +7445,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscal
 define void @test_vsoxseg2_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -7553,7 +7458,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -7571,8 +7475,8 @@ define void @test_vsoxseg3_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7585,8 +7489,8 @@ define void @test_vsoxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7602,8 +7506,8 @@ define void @test_vsoxseg3_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7616,8 +7520,8 @@ define void @test_vsoxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7633,8 +7537,8 @@ define void @test_vsoxseg3_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7647,8 +7551,8 @@ define void @test_vsoxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7664,9 +7568,9 @@ define void @test_vsoxseg4_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7679,9 +7583,9 @@ define void @test_vsoxseg4_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7697,9 +7601,9 @@ define void @test_vsoxseg4_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7712,9 +7616,9 @@ define void @test_vsoxseg4_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7730,9 +7634,9 @@ define void @test_vsoxseg4_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7745,9 +7649,9 @@ define void @test_vsoxseg4_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7762,7 +7666,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half>,<v
 define void @test_vsoxseg2_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -7776,7 +7679,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -7793,7 +7695,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half>,<vs
 define void @test_vsoxseg2_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -7807,7 +7708,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -7824,7 +7724,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half>,<v
 define void @test_vsoxseg2_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v16
@@ -7837,7 +7736,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v16, v0.t
@@ -7853,7 +7751,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double>,<vs
 define void @test_vsoxseg2_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7867,7 +7764,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7884,7 +7780,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double>,<vsc
 define void @test_vsoxseg2_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7898,7 +7793,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7915,7 +7809,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double>,<vs
 define void @test_vsoxseg2_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7929,7 +7822,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7946,7 +7838,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vsc
 define void @test_vsoxseg2_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -7960,7 +7851,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -7977,7 +7867,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vs
 define void @test_vsoxseg2_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -7991,7 +7880,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -8008,7 +7896,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vs
 define void @test_vsoxseg2_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -8022,7 +7909,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -8040,8 +7926,8 @@ define void @test_vsoxseg3_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8054,8 +7940,8 @@ define void @test_vsoxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8071,8 +7957,8 @@ define void @test_vsoxseg3_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8085,8 +7971,8 @@ define void @test_vsoxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8102,8 +7988,8 @@ define void @test_vsoxseg3_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8116,8 +8002,8 @@ define void @test_vsoxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8133,9 +8019,9 @@ define void @test_vsoxseg4_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8148,9 +8034,9 @@ define void @test_vsoxseg4_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8166,9 +8052,9 @@ define void @test_vsoxseg4_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8181,9 +8067,9 @@ define void @test_vsoxseg4_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8199,9 +8085,9 @@ define void @test_vsoxseg4_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8214,9 +8100,9 @@ define void @test_vsoxseg4_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8232,10 +8118,10 @@ define void @test_vsoxseg5_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8248,10 +8134,10 @@ define void @test_vsoxseg5_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8267,10 +8153,10 @@ define void @test_vsoxseg5_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8283,10 +8169,10 @@ define void @test_vsoxseg5_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8302,10 +8188,10 @@ define void @test_vsoxseg5_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8318,10 +8204,10 @@ define void @test_vsoxseg5_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8337,11 +8223,11 @@ define void @test_vsoxseg6_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8354,11 +8240,11 @@ define void @test_vsoxseg6_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8374,11 +8260,11 @@ define void @test_vsoxseg6_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8391,11 +8277,11 @@ define void @test_vsoxseg6_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8411,11 +8297,11 @@ define void @test_vsoxseg6_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8428,11 +8314,11 @@ define void @test_vsoxseg6_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8448,12 +8334,12 @@ define void @test_vsoxseg7_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8466,12 +8352,12 @@ define void @test_vsoxseg7_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8487,12 +8373,12 @@ define void @test_vsoxseg7_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8505,12 +8391,12 @@ define void @test_vsoxseg7_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8526,12 +8412,12 @@ define void @test_vsoxseg7_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8544,12 +8430,12 @@ define void @test_vsoxseg7_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8565,13 +8451,13 @@ define void @test_vsoxseg8_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8584,13 +8470,13 @@ define void @test_vsoxseg8_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8606,13 +8492,13 @@ define void @test_vsoxseg8_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8625,13 +8511,13 @@ define void @test_vsoxseg8_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8647,13 +8533,13 @@ define void @test_vsoxseg8_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8666,13 +8552,13 @@ define void @test_vsoxseg8_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8687,7 +8573,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vsc
 define void @test_vsoxseg2_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -8701,7 +8586,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -8718,7 +8602,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vsca
 define void @test_vsoxseg2_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -8732,7 +8615,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -8749,7 +8631,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vsc
 define void @test_vsoxseg2_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -8763,7 +8644,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -8781,8 +8661,8 @@ define void @test_vsoxseg3_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8795,8 +8675,8 @@ define void @test_vsoxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8812,8 +8692,8 @@ define void @test_vsoxseg3_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8826,8 +8706,8 @@ define void @test_vsoxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8843,8 +8723,8 @@ define void @test_vsoxseg3_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8857,8 +8737,8 @@ define void @test_vsoxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8874,9 +8754,9 @@ define void @test_vsoxseg4_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8889,9 +8769,9 @@ define void @test_vsoxseg4_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8907,9 +8787,9 @@ define void @test_vsoxseg4_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8922,9 +8802,9 @@ define void @test_vsoxseg4_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8940,9 +8820,9 @@ define void @test_vsoxseg4_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8955,9 +8835,9 @@ define void @test_vsoxseg4_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8973,10 +8853,10 @@ define void @test_vsoxseg5_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8989,10 +8869,10 @@ define void @test_vsoxseg5_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9008,10 +8888,10 @@ define void @test_vsoxseg5_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9024,10 +8904,10 @@ define void @test_vsoxseg5_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9043,10 +8923,10 @@ define void @test_vsoxseg5_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9059,10 +8939,10 @@ define void @test_vsoxseg5_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9078,11 +8958,11 @@ define void @test_vsoxseg6_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9095,11 +8975,11 @@ define void @test_vsoxseg6_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9115,11 +8995,11 @@ define void @test_vsoxseg6_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9132,11 +9012,11 @@ define void @test_vsoxseg6_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9152,11 +9032,11 @@ define void @test_vsoxseg6_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9169,11 +9049,11 @@ define void @test_vsoxseg6_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9189,12 +9069,12 @@ define void @test_vsoxseg7_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9207,12 +9087,12 @@ define void @test_vsoxseg7_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9228,12 +9108,12 @@ define void @test_vsoxseg7_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9246,12 +9126,12 @@ define void @test_vsoxseg7_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9267,12 +9147,12 @@ define void @test_vsoxseg7_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9285,12 +9165,12 @@ define void @test_vsoxseg7_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9306,13 +9186,13 @@ define void @test_vsoxseg8_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9325,13 +9205,13 @@ define void @test_vsoxseg8_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9347,13 +9227,13 @@ define void @test_vsoxseg8_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9366,13 +9246,13 @@ define void @test_vsoxseg8_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9388,13 +9268,13 @@ define void @test_vsoxseg8_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9407,13 +9287,13 @@ define void @test_vsoxseg8_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9428,7 +9308,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscal
 define void @test_vsoxseg2_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -9442,7 +9321,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -9459,7 +9337,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vsca
 define void @test_vsoxseg2_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -9473,7 +9350,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -9490,7 +9366,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vsca
 define void @test_vsoxseg2_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -9504,7 +9379,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -9522,8 +9396,8 @@ define void @test_vsoxseg3_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9536,8 +9410,8 @@ define void @test_vsoxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9553,8 +9427,8 @@ define void @test_vsoxseg3_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9567,8 +9441,8 @@ define void @test_vsoxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9584,8 +9458,8 @@ define void @test_vsoxseg3_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9598,8 +9472,8 @@ define void @test_vsoxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9615,9 +9489,9 @@ define void @test_vsoxseg4_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9630,9 +9504,9 @@ define void @test_vsoxseg4_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9648,9 +9522,9 @@ define void @test_vsoxseg4_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9663,9 +9537,9 @@ define void @test_vsoxseg4_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9681,9 +9555,9 @@ define void @test_vsoxseg4_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9696,9 +9570,9 @@ define void @test_vsoxseg4_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9714,10 +9588,10 @@ define void @test_vsoxseg5_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9730,10 +9604,10 @@ define void @test_vsoxseg5_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9749,10 +9623,10 @@ define void @test_vsoxseg5_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9765,10 +9639,10 @@ define void @test_vsoxseg5_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9784,10 +9658,10 @@ define void @test_vsoxseg5_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9800,10 +9674,10 @@ define void @test_vsoxseg5_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9819,11 +9693,11 @@ define void @test_vsoxseg6_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9836,11 +9710,11 @@ define void @test_vsoxseg6_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9856,11 +9730,11 @@ define void @test_vsoxseg6_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9873,11 +9747,11 @@ define void @test_vsoxseg6_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9893,11 +9767,11 @@ define void @test_vsoxseg6_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9910,11 +9784,11 @@ define void @test_vsoxseg6_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9930,12 +9804,12 @@ define void @test_vsoxseg7_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9948,12 +9822,12 @@ define void @test_vsoxseg7_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9969,12 +9843,12 @@ define void @test_vsoxseg7_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9987,12 +9861,12 @@ define void @test_vsoxseg7_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10008,12 +9882,12 @@ define void @test_vsoxseg7_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10026,12 +9900,12 @@ define void @test_vsoxseg7_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10047,13 +9921,13 @@ define void @test_vsoxseg8_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10066,13 +9940,13 @@ define void @test_vsoxseg8_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10088,13 +9962,13 @@ define void @test_vsoxseg8_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10107,13 +9981,13 @@ define void @test_vsoxseg8_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10129,13 +10003,13 @@ define void @test_vsoxseg8_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10148,13 +10022,13 @@ define void @test_vsoxseg8_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10169,7 +10043,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vsca
 define void @test_vsoxseg2_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -10183,7 +10056,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -10200,7 +10072,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vsc
 define void @test_vsoxseg2_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -10214,7 +10085,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -10231,7 +10101,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vsc
 define void @test_vsoxseg2_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -10245,7 +10114,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -10263,8 +10131,8 @@ define void @test_vsoxseg3_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10277,8 +10145,8 @@ define void @test_vsoxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10294,8 +10162,8 @@ define void @test_vsoxseg3_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10308,8 +10176,8 @@ define void @test_vsoxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10325,8 +10193,8 @@ define void @test_vsoxseg3_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10339,8 +10207,8 @@ define void @test_vsoxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10356,9 +10224,9 @@ define void @test_vsoxseg4_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10371,9 +10239,9 @@ define void @test_vsoxseg4_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10389,9 +10257,9 @@ define void @test_vsoxseg4_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10404,9 +10272,9 @@ define void @test_vsoxseg4_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10422,9 +10290,9 @@ define void @test_vsoxseg4_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10437,9 +10305,9 @@ define void @test_vsoxseg4_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10455,10 +10323,10 @@ define void @test_vsoxseg5_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10471,10 +10339,10 @@ define void @test_vsoxseg5_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10490,10 +10358,10 @@ define void @test_vsoxseg5_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10506,10 +10374,10 @@ define void @test_vsoxseg5_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10525,10 +10393,10 @@ define void @test_vsoxseg5_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10541,10 +10409,10 @@ define void @test_vsoxseg5_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10560,11 +10428,11 @@ define void @test_vsoxseg6_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10577,11 +10445,11 @@ define void @test_vsoxseg6_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10597,11 +10465,11 @@ define void @test_vsoxseg6_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10614,11 +10482,11 @@ define void @test_vsoxseg6_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10634,11 +10502,11 @@ define void @test_vsoxseg6_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10651,11 +10519,11 @@ define void @test_vsoxseg6_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10671,12 +10539,12 @@ define void @test_vsoxseg7_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10689,12 +10557,12 @@ define void @test_vsoxseg7_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10710,12 +10578,12 @@ define void @test_vsoxseg7_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10728,12 +10596,12 @@ define void @test_vsoxseg7_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10749,12 +10617,12 @@ define void @test_vsoxseg7_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10767,12 +10635,12 @@ define void @test_vsoxseg7_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10788,13 +10656,13 @@ define void @test_vsoxseg8_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10807,13 +10675,13 @@ define void @test_vsoxseg8_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10829,13 +10697,13 @@ define void @test_vsoxseg8_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10848,13 +10716,13 @@ define void @test_vsoxseg8_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10870,13 +10738,13 @@ define void @test_vsoxseg8_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10889,13 +10757,13 @@ define void @test_vsoxseg8_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10910,7 +10778,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half>,<vsca
 define void @test_vsoxseg2_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -10924,7 +10791,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -10941,7 +10807,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscal
 define void @test_vsoxseg2_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -10955,7 +10820,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -10972,7 +10836,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half>,<vsca
 define void @test_vsoxseg2_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v12
@@ -10985,7 +10848,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v12, v0.t
@@ -11002,8 +10864,8 @@ define void @test_vsoxseg3_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11016,8 +10878,8 @@ define void @test_vsoxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11033,8 +10895,8 @@ define void @test_vsoxseg3_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11047,8 +10909,8 @@ define void @test_vsoxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11063,11 +10925,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half>,<vsca
 define void @test_vsoxseg3_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
@@ -11077,11 +10939,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
@@ -11095,9 +10957,9 @@ define void @test_vsoxseg4_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11110,9 +10972,9 @@ define void @test_vsoxseg4_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11128,9 +10990,9 @@ define void @test_vsoxseg4_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11143,9 +11005,9 @@ define void @test_vsoxseg4_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11161,9 +11023,9 @@ define void @test_vsoxseg4_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -11176,9 +11038,9 @@ define void @test_vsoxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -11193,7 +11055,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float>,<vsc
 define void @test_vsoxseg2_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -11207,7 +11068,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -11224,7 +11084,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float>,<vsca
 define void @test_vsoxseg2_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -11238,7 +11097,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -11255,7 +11113,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float>,<vsc
 define void @test_vsoxseg2_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -11269,7 +11126,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -11286,7 +11142,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double>,<vs
 define void @test_vsoxseg2_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11300,7 +11155,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11317,7 +11171,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double>,<vsc
 define void @test_vsoxseg2_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11331,7 +11184,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11348,7 +11200,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double>,<vs
 define void @test_vsoxseg2_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11362,7 +11213,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11380,8 +11230,8 @@ define void @test_vsoxseg3_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11394,8 +11244,8 @@ define void @test_vsoxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11411,8 +11261,8 @@ define void @test_vsoxseg3_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11425,8 +11275,8 @@ define void @test_vsoxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11442,8 +11292,8 @@ define void @test_vsoxseg3_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11456,8 +11306,8 @@ define void @test_vsoxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11473,9 +11323,9 @@ define void @test_vsoxseg4_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11488,9 +11338,9 @@ define void @test_vsoxseg4_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11506,9 +11356,9 @@ define void @test_vsoxseg4_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11521,9 +11371,9 @@ define void @test_vsoxseg4_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11539,9 +11389,9 @@ define void @test_vsoxseg4_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11554,9 +11404,9 @@ define void @test_vsoxseg4_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11571,7 +11421,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vsca
 define void @test_vsoxseg2_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -11585,7 +11434,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -11602,7 +11450,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscal
 define void @test_vsoxseg2_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -11616,7 +11463,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -11633,7 +11479,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vsca
 define void @test_vsoxseg2_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v10
@@ -11646,7 +11491,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v10, v0.t
@@ -11663,8 +11507,8 @@ define void @test_vsoxseg3_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11677,8 +11521,8 @@ define void @test_vsoxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11694,8 +11538,8 @@ define void @test_vsoxseg3_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11708,8 +11552,8 @@ define void @test_vsoxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11724,11 +11568,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vsca
 define void @test_vsoxseg3_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
@@ -11738,11 +11582,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
@@ -11756,9 +11600,9 @@ define void @test_vsoxseg4_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11771,9 +11615,9 @@ define void @test_vsoxseg4_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11789,9 +11633,9 @@ define void @test_vsoxseg4_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11804,9 +11648,9 @@ define void @test_vsoxseg4_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11822,9 +11666,9 @@ define void @test_vsoxseg4_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11837,9 +11681,9 @@ define void @test_vsoxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11855,10 +11699,10 @@ define void @test_vsoxseg5_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11871,10 +11715,10 @@ define void @test_vsoxseg5_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11890,10 +11734,10 @@ define void @test_vsoxseg5_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11906,10 +11750,10 @@ define void @test_vsoxseg5_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11925,10 +11769,10 @@ define void @test_vsoxseg5_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11941,10 +11785,10 @@ define void @test_vsoxseg5_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11960,11 +11804,11 @@ define void @test_vsoxseg6_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11977,11 +11821,11 @@ define void @test_vsoxseg6_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11997,11 +11841,11 @@ define void @test_vsoxseg6_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12014,11 +11858,11 @@ define void @test_vsoxseg6_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12034,11 +11878,11 @@ define void @test_vsoxseg6_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -12051,11 +11895,11 @@ define void @test_vsoxseg6_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -12071,12 +11915,12 @@ define void @test_vsoxseg7_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12089,12 +11933,12 @@ define void @test_vsoxseg7_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12110,12 +11954,12 @@ define void @test_vsoxseg7_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12128,12 +11972,12 @@ define void @test_vsoxseg7_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12149,12 +11993,12 @@ define void @test_vsoxseg7_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -12167,12 +12011,12 @@ define void @test_vsoxseg7_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -12188,13 +12032,13 @@ define void @test_vsoxseg8_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12207,13 +12051,13 @@ define void @test_vsoxseg8_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12229,13 +12073,13 @@ define void @test_vsoxseg8_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12248,13 +12092,13 @@ define void @test_vsoxseg8_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12270,13 +12114,13 @@ define void @test_vsoxseg8_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -12289,13 +12133,13 @@ define void @test_vsoxseg8_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -12310,7 +12154,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vsca
 define void @test_vsoxseg2_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -12324,7 +12167,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -12341,7 +12183,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscal
 define void @test_vsoxseg2_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -12355,7 +12196,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -12372,7 +12212,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vsca
 define void @test_vsoxseg2_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -12386,7 +12225,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -12404,8 +12242,8 @@ define void @test_vsoxseg3_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12418,8 +12256,8 @@ define void @test_vsoxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12435,8 +12273,8 @@ define void @test_vsoxseg3_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12449,8 +12287,8 @@ define void @test_vsoxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12466,8 +12304,8 @@ define void @test_vsoxseg3_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12480,8 +12318,8 @@ define void @test_vsoxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12497,9 +12335,9 @@ define void @test_vsoxseg4_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12512,9 +12350,9 @@ define void @test_vsoxseg4_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12530,9 +12368,9 @@ define void @test_vsoxseg4_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12545,9 +12383,9 @@ define void @test_vsoxseg4_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12563,9 +12401,9 @@ define void @test_vsoxseg4_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12578,9 +12416,9 @@ define void @test_vsoxseg4_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12596,10 +12434,10 @@ define void @test_vsoxseg5_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12612,10 +12450,10 @@ define void @test_vsoxseg5_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12631,10 +12469,10 @@ define void @test_vsoxseg5_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12647,10 +12485,10 @@ define void @test_vsoxseg5_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12666,10 +12504,10 @@ define void @test_vsoxseg5_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12682,10 +12520,10 @@ define void @test_vsoxseg5_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12701,11 +12539,11 @@ define void @test_vsoxseg6_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12718,11 +12556,11 @@ define void @test_vsoxseg6_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12738,11 +12576,11 @@ define void @test_vsoxseg6_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12755,11 +12593,11 @@ define void @test_vsoxseg6_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12775,11 +12613,11 @@ define void @test_vsoxseg6_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12792,11 +12630,11 @@ define void @test_vsoxseg6_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12812,12 +12650,12 @@ define void @test_vsoxseg7_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12830,12 +12668,12 @@ define void @test_vsoxseg7_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12851,12 +12689,12 @@ define void @test_vsoxseg7_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12869,12 +12707,12 @@ define void @test_vsoxseg7_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12890,12 +12728,12 @@ define void @test_vsoxseg7_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12908,12 +12746,12 @@ define void @test_vsoxseg7_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12929,13 +12767,13 @@ define void @test_vsoxseg8_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12948,13 +12786,13 @@ define void @test_vsoxseg8_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12970,13 +12808,13 @@ define void @test_vsoxseg8_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12989,13 +12827,13 @@ define void @test_vsoxseg8_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13011,13 +12849,13 @@ define void @test_vsoxseg8_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13030,13 +12868,13 @@ define void @test_vsoxseg8_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13051,7 +12889,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float>,<vsc
 define void @test_vsoxseg2_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -13065,7 +12902,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -13082,7 +12918,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float>,<vsca
 define void @test_vsoxseg2_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -13096,7 +12931,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -13113,7 +12947,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float>,<vsc
 define void @test_vsoxseg2_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -13127,7 +12960,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -13145,8 +12977,8 @@ define void @test_vsoxseg3_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13159,8 +12991,8 @@ define void @test_vsoxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13176,8 +13008,8 @@ define void @test_vsoxseg3_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13190,8 +13022,8 @@ define void @test_vsoxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13207,8 +13039,8 @@ define void @test_vsoxseg3_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13221,8 +13053,8 @@ define void @test_vsoxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13238,9 +13070,9 @@ define void @test_vsoxseg4_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13253,9 +13085,9 @@ define void @test_vsoxseg4_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13271,9 +13103,9 @@ define void @test_vsoxseg4_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13286,9 +13118,9 @@ define void @test_vsoxseg4_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13304,9 +13136,9 @@ define void @test_vsoxseg4_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13319,9 +13151,9 @@ define void @test_vsoxseg4_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll
index 7f84323b0e022..dacf1f99f242d 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsoxseg-rv64.ll
@@ -8,7 +8,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16>,<vs
 define void @test_vsoxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -22,7 +21,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -39,7 +37,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16>,<vsc
 define void @test_vsoxseg2_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -53,7 +50,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -70,7 +66,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16>,<vs
 define void @test_vsoxseg2_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v16
@@ -83,7 +78,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v16, v0.t
@@ -99,7 +93,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscal
 define void @test_vsoxseg2_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -113,7 +106,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -130,7 +122,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale
 define void @test_vsoxseg2_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -144,7 +135,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -161,7 +151,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscal
 define void @test_vsoxseg2_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v12
@@ -174,7 +163,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v12, v0.t
@@ -190,7 +178,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscal
 define void @test_vsoxseg2_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -204,7 +191,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -222,8 +208,8 @@ define void @test_vsoxseg3_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -236,8 +222,8 @@ define void @test_vsoxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -253,8 +239,8 @@ define void @test_vsoxseg3_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -267,8 +253,8 @@ define void @test_vsoxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -283,11 +269,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscal
 define void @test_vsoxseg3_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei64.v v16, (a0), v12
+; CHECK-NEXT:    vsoxseg3ei64.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
@@ -297,11 +283,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsoxseg3ei64.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
@@ -315,8 +301,8 @@ define void @test_vsoxseg3_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -329,8 +315,8 @@ define void @test_vsoxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -346,9 +332,9 @@ define void @test_vsoxseg4_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -361,9 +347,9 @@ define void @test_vsoxseg4_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -379,9 +365,9 @@ define void @test_vsoxseg4_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -394,9 +380,9 @@ define void @test_vsoxseg4_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -412,9 +398,9 @@ define void @test_vsoxseg4_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -427,9 +413,9 @@ define void @test_vsoxseg4_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -445,9 +431,9 @@ define void @test_vsoxseg4_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -460,9 +446,9 @@ define void @test_vsoxseg4_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -477,7 +463,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vsca
 define void @test_vsoxseg2_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei16.v v8, (a0), v12
@@ -490,7 +475,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei16.v v8, (a0), v12, v0.t
@@ -506,7 +490,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscal
 define void @test_vsoxseg2_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -520,7 +503,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -537,7 +519,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vsca
 define void @test_vsoxseg2_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v16
@@ -550,7 +531,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v16, v0.t
@@ -566,11 +546,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vsca
 define void @test_vsoxseg3_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei16.v v16, (a0), v12
+; CHECK-NEXT:    vsoxseg3ei16.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl)
@@ -580,11 +560,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei16.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsoxseg3ei16.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
@@ -598,8 +578,8 @@ define void @test_vsoxseg3_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -612,8 +592,8 @@ define void @test_vsoxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -628,7 +608,6 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vsca
 define void @test_vsoxseg3_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -642,7 +621,6 @@ entry:
 define void @test_vsoxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -660,9 +638,9 @@ define void @test_vsoxseg4_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -675,9 +653,9 @@ define void @test_vsoxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -693,9 +671,9 @@ define void @test_vsoxseg4_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -708,9 +686,9 @@ define void @test_vsoxseg4_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -725,7 +703,6 @@ declare void @llvm.riscv.vsoxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vsca
 define void @test_vsoxseg4_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -740,7 +717,6 @@ entry:
 define void @test_vsoxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -758,7 +734,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscal
 define void @test_vsoxseg2_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -772,7 +747,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -789,7 +763,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscal
 define void @test_vsoxseg2_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -803,7 +776,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -820,7 +792,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscal
 define void @test_vsoxseg2_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -834,7 +805,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -851,7 +821,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale
 define void @test_vsoxseg2_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -865,7 +834,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -883,8 +851,8 @@ define void @test_vsoxseg3_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -897,8 +865,8 @@ define void @test_vsoxseg3_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -914,8 +882,8 @@ define void @test_vsoxseg3_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -928,8 +896,8 @@ define void @test_vsoxseg3_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -945,8 +913,8 @@ define void @test_vsoxseg3_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -959,8 +927,8 @@ define void @test_vsoxseg3_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -976,8 +944,8 @@ define void @test_vsoxseg3_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg3_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -990,8 +958,8 @@ define void @test_vsoxseg3_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1007,9 +975,9 @@ define void @test_vsoxseg4_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1022,9 +990,9 @@ define void @test_vsoxseg4_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1040,9 +1008,9 @@ define void @test_vsoxseg4_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1055,9 +1023,9 @@ define void @test_vsoxseg4_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1073,9 +1041,9 @@ define void @test_vsoxseg4_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1088,9 +1056,9 @@ define void @test_vsoxseg4_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1106,9 +1074,9 @@ define void @test_vsoxseg4_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg4_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1121,9 +1089,9 @@ define void @test_vsoxseg4_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1139,10 +1107,10 @@ define void @test_vsoxseg5_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1155,10 +1123,10 @@ define void @test_vsoxseg5_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1174,10 +1142,10 @@ define void @test_vsoxseg5_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1190,10 +1158,10 @@ define void @test_vsoxseg5_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1209,10 +1177,10 @@ define void @test_vsoxseg5_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1225,10 +1193,10 @@ define void @test_vsoxseg5_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1244,10 +1212,10 @@ define void @test_vsoxseg5_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg5_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1260,10 +1228,10 @@ define void @test_vsoxseg5_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1279,11 +1247,11 @@ define void @test_vsoxseg6_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1296,11 +1264,11 @@ define void @test_vsoxseg6_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1316,11 +1284,11 @@ define void @test_vsoxseg6_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1333,11 +1301,11 @@ define void @test_vsoxseg6_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1353,11 +1321,11 @@ define void @test_vsoxseg6_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1370,11 +1338,11 @@ define void @test_vsoxseg6_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1390,11 +1358,11 @@ define void @test_vsoxseg6_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg6_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1407,11 +1375,11 @@ define void @test_vsoxseg6_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1427,12 +1395,12 @@ define void @test_vsoxseg7_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1445,12 +1413,12 @@ define void @test_vsoxseg7_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1466,12 +1434,12 @@ define void @test_vsoxseg7_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1484,12 +1452,12 @@ define void @test_vsoxseg7_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1505,12 +1473,12 @@ define void @test_vsoxseg7_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1523,12 +1491,12 @@ define void @test_vsoxseg7_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1544,15 +1512,15 @@ define void @test_vsoxseg7_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg7_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
-; CHECK-NEXT:    ret
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
+; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg7.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
   ret void
@@ -1562,12 +1530,12 @@ define void @test_vsoxseg7_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1583,13 +1551,13 @@ define void @test_vsoxseg8_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1602,13 +1570,13 @@ define void @test_vsoxseg8_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1624,13 +1592,13 @@ define void @test_vsoxseg8_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1643,13 +1611,13 @@ define void @test_vsoxseg8_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1665,13 +1633,13 @@ define void @test_vsoxseg8_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1684,13 +1652,13 @@ define void @test_vsoxseg8_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1706,13 +1674,13 @@ define void @test_vsoxseg8_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg8_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1725,13 +1693,13 @@ define void @test_vsoxseg8_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1746,7 +1714,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscal
 define void @test_vsoxseg2_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1760,7 +1727,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1777,7 +1743,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscal
 define void @test_vsoxseg2_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1791,7 +1756,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1808,7 +1772,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscal
 define void @test_vsoxseg2_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1822,7 +1785,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1839,7 +1801,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale
 define void @test_vsoxseg2_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1853,7 +1814,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1871,8 +1831,8 @@ define void @test_vsoxseg3_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1885,8 +1845,8 @@ define void @test_vsoxseg3_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1902,8 +1862,8 @@ define void @test_vsoxseg3_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1916,8 +1876,8 @@ define void @test_vsoxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1933,8 +1893,8 @@ define void @test_vsoxseg3_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1947,8 +1907,8 @@ define void @test_vsoxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1964,8 +1924,8 @@ define void @test_vsoxseg3_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg3_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1978,8 +1938,8 @@ define void @test_vsoxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1995,9 +1955,9 @@ define void @test_vsoxseg4_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2010,9 +1970,9 @@ define void @test_vsoxseg4_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2028,9 +1988,9 @@ define void @test_vsoxseg4_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2043,9 +2003,9 @@ define void @test_vsoxseg4_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2061,9 +2021,9 @@ define void @test_vsoxseg4_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2076,9 +2036,9 @@ define void @test_vsoxseg4_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2094,9 +2054,9 @@ define void @test_vsoxseg4_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg4_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2109,9 +2069,9 @@ define void @test_vsoxseg4_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2127,10 +2087,10 @@ define void @test_vsoxseg5_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2143,10 +2103,10 @@ define void @test_vsoxseg5_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2162,10 +2122,10 @@ define void @test_vsoxseg5_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2178,10 +2138,10 @@ define void @test_vsoxseg5_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2197,10 +2157,10 @@ define void @test_vsoxseg5_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2213,10 +2173,10 @@ define void @test_vsoxseg5_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2232,10 +2192,10 @@ define void @test_vsoxseg5_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg5_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2248,10 +2208,10 @@ define void @test_vsoxseg5_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2267,11 +2227,11 @@ define void @test_vsoxseg6_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2284,11 +2244,11 @@ define void @test_vsoxseg6_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2304,11 +2264,11 @@ define void @test_vsoxseg6_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2321,11 +2281,11 @@ define void @test_vsoxseg6_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2341,11 +2301,11 @@ define void @test_vsoxseg6_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2358,11 +2318,11 @@ define void @test_vsoxseg6_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2378,11 +2338,11 @@ define void @test_vsoxseg6_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg6_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2395,11 +2355,11 @@ define void @test_vsoxseg6_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2415,12 +2375,12 @@ define void @test_vsoxseg7_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2433,12 +2393,12 @@ define void @test_vsoxseg7_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2454,12 +2414,12 @@ define void @test_vsoxseg7_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2472,12 +2432,12 @@ define void @test_vsoxseg7_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2493,12 +2453,12 @@ define void @test_vsoxseg7_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2511,16 +2471,16 @@ define void @test_vsoxseg7_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+entry:
   tail call void @llvm.riscv.vsoxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
   ret void
 }
@@ -2532,12 +2492,12 @@ define void @test_vsoxseg7_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg7_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2550,12 +2510,12 @@ define void @test_vsoxseg7_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2571,13 +2531,13 @@ define void @test_vsoxseg8_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2590,13 +2550,13 @@ define void @test_vsoxseg8_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2612,13 +2572,13 @@ define void @test_vsoxseg8_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2631,13 +2591,13 @@ define void @test_vsoxseg8_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2653,13 +2613,13 @@ define void @test_vsoxseg8_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2672,13 +2632,13 @@ define void @test_vsoxseg8_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2694,13 +2654,13 @@ define void @test_vsoxseg8_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg8_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2713,13 +2673,13 @@ define void @test_vsoxseg8_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2734,7 +2694,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscal
 define void @test_vsoxseg2_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2748,7 +2707,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2765,7 +2723,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale
 define void @test_vsoxseg2_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2779,7 +2736,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2796,7 +2752,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscal
 define void @test_vsoxseg2_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v16
@@ -2809,7 +2764,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v16, v0.t
@@ -2825,7 +2779,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscal
 define void @test_vsoxseg2_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v12
@@ -2838,7 +2791,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v12, v0.t
@@ -2855,8 +2807,8 @@ define void @test_vsoxseg3_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -2869,8 +2821,8 @@ define void @test_vsoxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -2886,8 +2838,8 @@ define void @test_vsoxseg3_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -2900,8 +2852,8 @@ define void @test_vsoxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -2916,7 +2868,6 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscal
 define void @test_vsoxseg3_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2930,7 +2881,6 @@ entry:
 define void @test_vsoxseg3_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2947,11 +2897,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscal
 define void @test_vsoxseg3_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
@@ -2961,11 +2911,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
@@ -2979,9 +2929,9 @@ define void @test_vsoxseg4_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -2994,9 +2944,9 @@ define void @test_vsoxseg4_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3012,9 +2962,9 @@ define void @test_vsoxseg4_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3027,9 +2977,9 @@ define void @test_vsoxseg4_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3044,7 +2994,6 @@ declare void @llvm.riscv.vsoxseg4.mask.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscal
 define void @test_vsoxseg4_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -3059,7 +3008,6 @@ entry:
 define void @test_vsoxseg4_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -3078,9 +3026,9 @@ define void @test_vsoxseg4_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -3093,9 +3041,9 @@ define void @test_vsoxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -3110,7 +3058,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale
 define void @test_vsoxseg2_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v10
@@ -3123,7 +3070,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v10, v0.t
@@ -3139,7 +3085,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x
 define void @test_vsoxseg2_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3153,7 +3098,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3170,7 +3114,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale
 define void @test_vsoxseg2_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v12
@@ -3183,7 +3126,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v12, v0.t
@@ -3199,7 +3141,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale
 define void @test_vsoxseg2_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3213,7 +3154,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3230,11 +3170,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale
 define void @test_vsoxseg3_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
@@ -3244,11 +3184,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
@@ -3262,8 +3202,8 @@ define void @test_vsoxseg3_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3276,8 +3216,8 @@ define void @test_vsoxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3292,7 +3232,6 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale
 define void @test_vsoxseg3_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3306,7 +3245,6 @@ entry:
 define void @test_vsoxseg3_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3324,8 +3262,8 @@ define void @test_vsoxseg3_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg3_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3338,8 +3276,8 @@ define void @test_vsoxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3355,9 +3293,9 @@ define void @test_vsoxseg4_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3370,9 +3308,9 @@ define void @test_vsoxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3388,9 +3326,9 @@ define void @test_vsoxseg4_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3403,9 +3341,9 @@ define void @test_vsoxseg4_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3420,7 +3358,6 @@ declare void @llvm.riscv.vsoxseg4.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale
 define void @test_vsoxseg4_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3435,7 +3372,6 @@ entry:
 define void @test_vsoxseg4_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3454,9 +3390,9 @@ define void @test_vsoxseg4_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg4_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3469,9 +3405,9 @@ define void @test_vsoxseg4_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3487,10 +3423,10 @@ define void @test_vsoxseg5_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3503,10 +3439,10 @@ define void @test_vsoxseg5_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3522,10 +3458,10 @@ define void @test_vsoxseg5_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3538,10 +3474,10 @@ define void @test_vsoxseg5_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3556,13 +3492,13 @@ declare void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale
 define void @test_vsoxseg5_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vsoxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT:    vsoxseg5ei64.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg5.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
@@ -3572,13 +3508,13 @@ entry:
 define void @test_vsoxseg5_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vsoxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsoxseg5ei64.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
@@ -3592,10 +3528,10 @@ define void @test_vsoxseg5_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg5_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3608,10 +3544,10 @@ define void @test_vsoxseg5_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3627,11 +3563,11 @@ define void @test_vsoxseg6_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3644,11 +3580,11 @@ define void @test_vsoxseg6_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3664,11 +3600,11 @@ define void @test_vsoxseg6_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3681,11 +3617,11 @@ define void @test_vsoxseg6_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3701,11 +3637,11 @@ define void @test_vsoxseg6_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -3718,11 +3654,11 @@ define void @test_vsoxseg6_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -3738,11 +3674,11 @@ define void @test_vsoxseg6_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3755,11 +3691,11 @@ define void @test_vsoxseg6_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3775,12 +3711,12 @@ define void @test_vsoxseg7_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3793,12 +3729,12 @@ define void @test_vsoxseg7_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3814,12 +3750,12 @@ define void @test_vsoxseg7_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3832,12 +3768,12 @@ define void @test_vsoxseg7_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3853,12 +3789,12 @@ define void @test_vsoxseg7_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -3871,12 +3807,12 @@ define void @test_vsoxseg7_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -3892,12 +3828,12 @@ define void @test_vsoxseg7_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3910,12 +3846,12 @@ define void @test_vsoxseg7_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3931,13 +3867,13 @@ define void @test_vsoxseg8_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3950,13 +3886,13 @@ define void @test_vsoxseg8_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3972,13 +3908,13 @@ define void @test_vsoxseg8_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3991,13 +3927,13 @@ define void @test_vsoxseg8_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4013,13 +3949,13 @@ define void @test_vsoxseg8_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -4032,13 +3968,13 @@ define void @test_vsoxseg8_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -4054,13 +3990,13 @@ define void @test_vsoxseg8_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4073,13 +4009,13 @@ define void @test_vsoxseg8_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4094,7 +4030,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscal
 define void @test_vsoxseg2_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4108,7 +4043,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4125,7 +4059,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscal
 define void @test_vsoxseg2_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4139,7 +4072,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4156,7 +4088,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscal
 define void @test_vsoxseg2_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4170,7 +4101,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4187,7 +4117,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale
 define void @test_vsoxseg2_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4201,7 +4130,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4219,8 +4147,8 @@ define void @test_vsoxseg3_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4233,8 +4161,8 @@ define void @test_vsoxseg3_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4250,8 +4178,8 @@ define void @test_vsoxseg3_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4264,8 +4192,8 @@ define void @test_vsoxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4281,8 +4209,8 @@ define void @test_vsoxseg3_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4295,8 +4223,8 @@ define void @test_vsoxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4312,8 +4240,8 @@ define void @test_vsoxseg3_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg3_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4326,8 +4254,8 @@ define void @test_vsoxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4343,9 +4271,9 @@ define void @test_vsoxseg4_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4358,9 +4286,9 @@ define void @test_vsoxseg4_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4376,9 +4304,9 @@ define void @test_vsoxseg4_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4391,9 +4319,9 @@ define void @test_vsoxseg4_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4409,9 +4337,9 @@ define void @test_vsoxseg4_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4424,9 +4352,9 @@ define void @test_vsoxseg4_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4442,9 +4370,9 @@ define void @test_vsoxseg4_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg4_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4457,9 +4385,9 @@ define void @test_vsoxseg4_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4475,10 +4403,10 @@ define void @test_vsoxseg5_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4491,10 +4419,10 @@ define void @test_vsoxseg5_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4510,10 +4438,10 @@ define void @test_vsoxseg5_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4526,10 +4454,10 @@ define void @test_vsoxseg5_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4545,10 +4473,10 @@ define void @test_vsoxseg5_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4561,10 +4489,10 @@ define void @test_vsoxseg5_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4580,10 +4508,10 @@ define void @test_vsoxseg5_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg5_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4596,10 +4524,10 @@ define void @test_vsoxseg5_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4615,11 +4543,11 @@ define void @test_vsoxseg6_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4632,11 +4560,11 @@ define void @test_vsoxseg6_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4652,11 +4580,11 @@ define void @test_vsoxseg6_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4669,11 +4597,11 @@ define void @test_vsoxseg6_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4689,11 +4617,11 @@ define void @test_vsoxseg6_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4706,11 +4634,11 @@ define void @test_vsoxseg6_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4726,11 +4654,11 @@ define void @test_vsoxseg6_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg6_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4743,11 +4671,11 @@ define void @test_vsoxseg6_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4763,12 +4691,12 @@ define void @test_vsoxseg7_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4781,12 +4709,12 @@ define void @test_vsoxseg7_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4802,12 +4730,12 @@ define void @test_vsoxseg7_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4820,12 +4748,12 @@ define void @test_vsoxseg7_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4841,12 +4769,12 @@ define void @test_vsoxseg7_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4859,12 +4787,12 @@ define void @test_vsoxseg7_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4880,12 +4808,12 @@ define void @test_vsoxseg7_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg7_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4898,16 +4826,16 @@ define void @test_vsoxseg7_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+entry:
   tail call void @llvm.riscv.vsoxseg7.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
   ret void
 }
@@ -4919,13 +4847,13 @@ define void @test_vsoxseg8_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4938,13 +4866,13 @@ define void @test_vsoxseg8_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4960,13 +4888,13 @@ define void @test_vsoxseg8_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4979,13 +4907,13 @@ define void @test_vsoxseg8_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5001,13 +4929,13 @@ define void @test_vsoxseg8_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5020,13 +4948,13 @@ define void @test_vsoxseg8_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5042,13 +4970,13 @@ define void @test_vsoxseg8_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5061,13 +4989,13 @@ define void @test_vsoxseg8_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5082,7 +5010,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscal
 define void @test_vsoxseg2_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -5096,7 +5023,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -5113,7 +5039,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale
 define void @test_vsoxseg2_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -5127,7 +5052,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -5144,7 +5068,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscal
 define void @test_vsoxseg2_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -5158,7 +5081,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -5175,7 +5097,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscal
 define void @test_vsoxseg2_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v10
@@ -5188,7 +5109,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v10, v0.t
@@ -5205,8 +5125,8 @@ define void @test_vsoxseg3_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5219,8 +5139,8 @@ define void @test_vsoxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5236,8 +5156,8 @@ define void @test_vsoxseg3_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5250,8 +5170,8 @@ define void @test_vsoxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5267,8 +5187,8 @@ define void @test_vsoxseg3_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5281,8 +5201,8 @@ define void @test_vsoxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5297,11 +5217,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscal
 define void @test_vsoxseg3_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vsoxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT:    vsoxseg3ei64.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
@@ -5311,11 +5231,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vsoxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsoxseg3ei64.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
@@ -5329,9 +5249,9 @@ define void @test_vsoxseg4_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5344,9 +5264,9 @@ define void @test_vsoxseg4_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5362,9 +5282,9 @@ define void @test_vsoxseg4_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5377,9 +5297,9 @@ define void @test_vsoxseg4_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5395,9 +5315,9 @@ define void @test_vsoxseg4_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5410,9 +5330,9 @@ define void @test_vsoxseg4_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5428,9 +5348,9 @@ define void @test_vsoxseg4_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -5443,9 +5363,9 @@ define void @test_vsoxseg4_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -5461,10 +5381,10 @@ define void @test_vsoxseg5_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5477,10 +5397,10 @@ define void @test_vsoxseg5_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5496,10 +5416,10 @@ define void @test_vsoxseg5_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5512,10 +5432,10 @@ define void @test_vsoxseg5_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5531,10 +5451,10 @@ define void @test_vsoxseg5_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5547,10 +5467,10 @@ define void @test_vsoxseg5_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5566,10 +5486,10 @@ define void @test_vsoxseg5_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -5582,10 +5502,10 @@ define void @test_vsoxseg5_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -5601,11 +5521,11 @@ define void @test_vsoxseg6_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5618,11 +5538,11 @@ define void @test_vsoxseg6_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5638,11 +5558,11 @@ define void @test_vsoxseg6_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5655,11 +5575,11 @@ define void @test_vsoxseg6_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5675,11 +5595,11 @@ define void @test_vsoxseg6_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5692,11 +5612,11 @@ define void @test_vsoxseg6_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5712,11 +5632,11 @@ define void @test_vsoxseg6_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -5729,11 +5649,11 @@ define void @test_vsoxseg6_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -5749,12 +5669,12 @@ define void @test_vsoxseg7_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5767,12 +5687,12 @@ define void @test_vsoxseg7_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5788,12 +5708,12 @@ define void @test_vsoxseg7_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5806,12 +5726,12 @@ define void @test_vsoxseg7_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5827,12 +5747,12 @@ define void @test_vsoxseg7_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5845,12 +5765,12 @@ define void @test_vsoxseg7_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5866,12 +5786,12 @@ define void @test_vsoxseg7_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -5884,12 +5804,12 @@ define void @test_vsoxseg7_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -5905,13 +5825,13 @@ define void @test_vsoxseg8_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5924,13 +5844,13 @@ define void @test_vsoxseg8_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5946,13 +5866,13 @@ define void @test_vsoxseg8_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5965,13 +5885,13 @@ define void @test_vsoxseg8_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5987,13 +5907,13 @@ define void @test_vsoxseg8_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6006,13 +5926,13 @@ define void @test_vsoxseg8_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6028,13 +5948,13 @@ define void @test_vsoxseg8_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -6047,13 +5967,13 @@ define void @test_vsoxseg8_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -6068,7 +5988,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg2_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei16.v v8, (a0), v10
@@ -6081,7 +6000,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei16.v v8, (a0), v10, v0.t
@@ -6097,7 +6015,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x
 define void @test_vsoxseg2_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -6111,7 +6028,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -6128,7 +6044,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg2_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v16
@@ -6141,7 +6056,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v16, v0.t
@@ -6157,7 +6071,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg2_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v12
@@ -6170,7 +6083,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v12, v0.t
@@ -6186,11 +6098,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg3_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10
+; CHECK-NEXT:    vsoxseg3ei16.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
@@ -6200,11 +6112,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsoxseg3ei16.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
@@ -6218,8 +6130,8 @@ define void @test_vsoxseg3_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6232,8 +6144,8 @@ define void @test_vsoxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6248,7 +6160,6 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg3_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -6262,7 +6173,6 @@ entry:
 define void @test_vsoxseg3_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -6279,7 +6189,6 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg3_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -6293,7 +6202,6 @@ entry:
 define void @test_vsoxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -6311,9 +6219,9 @@ define void @test_vsoxseg4_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -6326,9 +6234,9 @@ define void @test_vsoxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -6344,9 +6252,9 @@ define void @test_vsoxseg4_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6359,9 +6267,9 @@ define void @test_vsoxseg4_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6376,7 +6284,6 @@ declare void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg4_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6391,7 +6298,6 @@ entry:
 define void @test_vsoxseg4_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6409,7 +6315,6 @@ declare void @llvm.riscv.vsoxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg4_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6424,7 +6329,6 @@ entry:
 define void @test_vsoxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6443,10 +6347,10 @@ define void @test_vsoxseg5_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -6459,10 +6363,10 @@ define void @test_vsoxseg5_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -6478,10 +6382,10 @@ define void @test_vsoxseg5_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6494,10 +6398,10 @@ define void @test_vsoxseg5_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6512,7 +6416,6 @@ declare void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg5_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6528,7 +6431,6 @@ entry:
 define void @test_vsoxseg5_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6547,13 +6449,13 @@ declare void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg5_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg5_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsoxseg5ei32.v v16, (a0), v12
+; CHECK-NEXT:    vsoxseg5ei32.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
@@ -6563,13 +6465,13 @@ entry:
 define void @test_vsoxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsoxseg5ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsoxseg5ei32.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
@@ -6583,11 +6485,11 @@ define void @test_vsoxseg6_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -6600,11 +6502,11 @@ define void @test_vsoxseg6_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -6620,11 +6522,11 @@ define void @test_vsoxseg6_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6637,11 +6539,11 @@ define void @test_vsoxseg6_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6656,7 +6558,6 @@ declare void @llvm.riscv.vsoxseg6.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg6_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6673,7 +6574,6 @@ entry:
 define void @test_vsoxseg6_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6694,11 +6594,11 @@ define void @test_vsoxseg6_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -6711,11 +6611,11 @@ define void @test_vsoxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -6731,12 +6631,12 @@ define void @test_vsoxseg7_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -6749,12 +6649,12 @@ define void @test_vsoxseg7_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -6770,12 +6670,12 @@ define void @test_vsoxseg7_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6788,12 +6688,12 @@ define void @test_vsoxseg7_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6808,7 +6708,6 @@ declare void @llvm.riscv.vsoxseg7.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg7_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6826,7 +6725,6 @@ entry:
 define void @test_vsoxseg7_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6848,12 +6746,12 @@ define void @test_vsoxseg7_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -6866,12 +6764,12 @@ define void @test_vsoxseg7_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -6887,13 +6785,13 @@ define void @test_vsoxseg8_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -6906,13 +6804,13 @@ define void @test_vsoxseg8_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -6928,13 +6826,13 @@ define void @test_vsoxseg8_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6947,13 +6845,13 @@ define void @test_vsoxseg8_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6968,7 +6866,6 @@ declare void @llvm.riscv.vsoxseg8.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale
 define void @test_vsoxseg8_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6987,7 +6884,6 @@ entry:
 define void @test_vsoxseg8_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -7010,13 +6906,13 @@ define void @test_vsoxseg8_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -7029,13 +6925,13 @@ define void @test_vsoxseg8_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -7050,7 +6946,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i32(<vscale x 4 x i64>,<vscal
 define void @test_vsoxseg2_nxv4i64_nxv4i32(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7064,7 +6959,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i64_nxv4i32(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7081,7 +6975,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i8(<vscale x 4 x i64>,<vscale
 define void @test_vsoxseg2_nxv4i64_nxv4i8(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7095,7 +6988,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i64_nxv4i8(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7112,7 +7004,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i64(<vscale x 4 x i64>,<vscal
 define void @test_vsoxseg2_nxv4i64_nxv4i64(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7126,7 +7017,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i64_nxv4i64(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7143,7 +7033,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i64.nxv4i16(<vscale x 4 x i64>,<vscal
 define void @test_vsoxseg2_nxv4i64_nxv4i16(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7157,7 +7046,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i64_nxv4i16(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7174,7 +7062,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscal
 define void @test_vsoxseg2_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v10
@@ -7187,7 +7074,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v10, v0.t
@@ -7203,7 +7089,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale
 define void @test_vsoxseg2_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -7217,7 +7102,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -7234,7 +7118,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscal
 define void @test_vsoxseg2_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v12
@@ -7247,7 +7130,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v12, v0.t
@@ -7263,7 +7145,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscal
 define void @test_vsoxseg2_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -7277,7 +7158,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -7294,11 +7174,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscal
 define void @test_vsoxseg3_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
@@ -7308,11 +7188,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
@@ -7326,8 +7206,8 @@ define void @test_vsoxseg3_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7340,8 +7220,8 @@ define void @test_vsoxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7356,7 +7236,6 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscal
 define void @test_vsoxseg3_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -7370,7 +7249,6 @@ entry:
 define void @test_vsoxseg3_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -7388,8 +7266,8 @@ define void @test_vsoxseg3_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7402,8 +7280,8 @@ define void @test_vsoxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7419,9 +7297,9 @@ define void @test_vsoxseg4_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7434,9 +7312,9 @@ define void @test_vsoxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7452,9 +7330,9 @@ define void @test_vsoxseg4_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7467,9 +7345,9 @@ define void @test_vsoxseg4_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7484,7 +7362,6 @@ declare void @llvm.riscv.vsoxseg4.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscal
 define void @test_vsoxseg4_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -7499,7 +7376,6 @@ entry:
 define void @test_vsoxseg4_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -7518,9 +7394,9 @@ define void @test_vsoxseg4_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7533,9 +7409,9 @@ define void @test_vsoxseg4_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7551,10 +7427,10 @@ define void @test_vsoxseg5_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7567,10 +7443,10 @@ define void @test_vsoxseg5_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7586,10 +7462,10 @@ define void @test_vsoxseg5_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7602,10 +7478,10 @@ define void @test_vsoxseg5_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7620,13 +7496,13 @@ declare void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscal
 define void @test_vsoxseg5_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsoxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT:    vsoxseg5ei64.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg5.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
@@ -7636,13 +7512,13 @@ entry:
 define void @test_vsoxseg5_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsoxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsoxseg5ei64.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
@@ -7656,10 +7532,10 @@ define void @test_vsoxseg5_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7672,10 +7548,10 @@ define void @test_vsoxseg5_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7691,11 +7567,11 @@ define void @test_vsoxseg6_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7708,11 +7584,11 @@ define void @test_vsoxseg6_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7728,11 +7604,11 @@ define void @test_vsoxseg6_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7745,11 +7621,11 @@ define void @test_vsoxseg6_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7765,11 +7641,11 @@ define void @test_vsoxseg6_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -7782,11 +7658,11 @@ define void @test_vsoxseg6_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -7802,11 +7678,11 @@ define void @test_vsoxseg6_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7819,11 +7695,11 @@ define void @test_vsoxseg6_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7839,12 +7715,12 @@ define void @test_vsoxseg7_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7857,12 +7733,12 @@ define void @test_vsoxseg7_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7878,12 +7754,12 @@ define void @test_vsoxseg7_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7896,12 +7772,12 @@ define void @test_vsoxseg7_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7917,12 +7793,12 @@ define void @test_vsoxseg7_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -7935,12 +7811,12 @@ define void @test_vsoxseg7_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -7956,12 +7832,12 @@ define void @test_vsoxseg7_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7974,12 +7850,12 @@ define void @test_vsoxseg7_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7995,13 +7871,13 @@ define void @test_vsoxseg8_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -8014,13 +7890,13 @@ define void @test_vsoxseg8_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -8036,13 +7912,13 @@ define void @test_vsoxseg8_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8055,13 +7931,13 @@ define void @test_vsoxseg8_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8077,13 +7953,13 @@ define void @test_vsoxseg8_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -8096,13 +7972,13 @@ define void @test_vsoxseg8_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -8118,13 +7994,13 @@ define void @test_vsoxseg8_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8137,13 +8013,13 @@ define void @test_vsoxseg8_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8158,7 +8034,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale
 define void @test_vsoxseg2_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -8172,7 +8047,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -8189,7 +8063,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale
 define void @test_vsoxseg2_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -8203,7 +8076,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -8220,7 +8092,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale
 define void @test_vsoxseg2_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -8234,7 +8105,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -8251,7 +8121,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x
 define void @test_vsoxseg2_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -8265,7 +8134,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -8283,8 +8151,8 @@ define void @test_vsoxseg3_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8297,8 +8165,8 @@ define void @test_vsoxseg3_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8314,8 +8182,8 @@ define void @test_vsoxseg3_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8328,8 +8196,8 @@ define void @test_vsoxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8345,8 +8213,8 @@ define void @test_vsoxseg3_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8359,8 +8227,8 @@ define void @test_vsoxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8376,8 +8244,8 @@ define void @test_vsoxseg3_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg3_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8390,8 +8258,8 @@ define void @test_vsoxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8407,9 +8275,9 @@ define void @test_vsoxseg4_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8422,9 +8290,9 @@ define void @test_vsoxseg4_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8440,9 +8308,9 @@ define void @test_vsoxseg4_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8455,9 +8323,9 @@ define void @test_vsoxseg4_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8473,9 +8341,9 @@ define void @test_vsoxseg4_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8488,9 +8356,9 @@ define void @test_vsoxseg4_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8506,9 +8374,9 @@ define void @test_vsoxseg4_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg4_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8521,9 +8389,9 @@ define void @test_vsoxseg4_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8539,10 +8407,10 @@ define void @test_vsoxseg5_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8555,10 +8423,10 @@ define void @test_vsoxseg5_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8574,10 +8442,10 @@ define void @test_vsoxseg5_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8590,10 +8458,10 @@ define void @test_vsoxseg5_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8609,10 +8477,10 @@ define void @test_vsoxseg5_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8625,10 +8493,10 @@ define void @test_vsoxseg5_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8644,10 +8512,10 @@ define void @test_vsoxseg5_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg5_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8660,10 +8528,10 @@ define void @test_vsoxseg5_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8679,11 +8547,11 @@ define void @test_vsoxseg6_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8696,11 +8564,11 @@ define void @test_vsoxseg6_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8716,11 +8584,11 @@ define void @test_vsoxseg6_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8733,11 +8601,11 @@ define void @test_vsoxseg6_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8753,11 +8621,11 @@ define void @test_vsoxseg6_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8770,11 +8638,11 @@ define void @test_vsoxseg6_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8790,11 +8658,11 @@ define void @test_vsoxseg6_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg6_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8807,11 +8675,11 @@ define void @test_vsoxseg6_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8827,12 +8695,12 @@ define void @test_vsoxseg7_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8845,12 +8713,12 @@ define void @test_vsoxseg7_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8866,12 +8734,12 @@ define void @test_vsoxseg7_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8884,12 +8752,12 @@ define void @test_vsoxseg7_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8905,12 +8773,12 @@ define void @test_vsoxseg7_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8923,12 +8791,12 @@ define void @test_vsoxseg7_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8944,12 +8812,12 @@ define void @test_vsoxseg7_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg7_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8962,12 +8830,12 @@ define void @test_vsoxseg7_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8983,13 +8851,13 @@ define void @test_vsoxseg8_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9002,13 +8870,13 @@ define void @test_vsoxseg8_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9024,13 +8892,13 @@ define void @test_vsoxseg8_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9043,13 +8911,13 @@ define void @test_vsoxseg8_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9065,13 +8933,13 @@ define void @test_vsoxseg8_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9084,13 +8952,13 @@ define void @test_vsoxseg8_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9106,13 +8974,13 @@ define void @test_vsoxseg8_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9125,13 +8993,13 @@ define void @test_vsoxseg8_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9146,7 +9014,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale
 define void @test_vsoxseg2_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -9160,7 +9027,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -9177,7 +9043,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x
 define void @test_vsoxseg2_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -9191,7 +9056,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -9208,7 +9072,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale
 define void @test_vsoxseg2_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -9222,7 +9085,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -9239,7 +9101,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale
 define void @test_vsoxseg2_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v10
@@ -9252,7 +9113,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v10, v0.t
@@ -9269,8 +9129,8 @@ define void @test_vsoxseg3_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9283,8 +9143,8 @@ define void @test_vsoxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9300,8 +9160,8 @@ define void @test_vsoxseg3_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9314,8 +9174,8 @@ define void @test_vsoxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9331,8 +9191,8 @@ define void @test_vsoxseg3_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9345,8 +9205,8 @@ define void @test_vsoxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9361,11 +9221,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale
 define void @test_vsoxseg3_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vsoxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT:    vsoxseg3ei64.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
@@ -9375,11 +9235,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vsoxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsoxseg3ei64.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
@@ -9393,9 +9253,9 @@ define void @test_vsoxseg4_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9408,9 +9268,9 @@ define void @test_vsoxseg4_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9426,9 +9286,9 @@ define void @test_vsoxseg4_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9441,9 +9301,9 @@ define void @test_vsoxseg4_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9459,9 +9319,9 @@ define void @test_vsoxseg4_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9474,9 +9334,9 @@ define void @test_vsoxseg4_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9492,9 +9352,9 @@ define void @test_vsoxseg4_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg4_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -9507,9 +9367,9 @@ define void @test_vsoxseg4_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -9525,10 +9385,10 @@ define void @test_vsoxseg5_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9541,10 +9401,10 @@ define void @test_vsoxseg5_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9560,10 +9420,10 @@ define void @test_vsoxseg5_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9576,10 +9436,10 @@ define void @test_vsoxseg5_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9595,10 +9455,10 @@ define void @test_vsoxseg5_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9611,10 +9471,10 @@ define void @test_vsoxseg5_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9630,10 +9490,10 @@ define void @test_vsoxseg5_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg5_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -9646,10 +9506,10 @@ define void @test_vsoxseg5_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -9665,11 +9525,11 @@ define void @test_vsoxseg6_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9682,11 +9542,11 @@ define void @test_vsoxseg6_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9702,11 +9562,11 @@ define void @test_vsoxseg6_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9719,11 +9579,11 @@ define void @test_vsoxseg6_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9739,11 +9599,11 @@ define void @test_vsoxseg6_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9756,11 +9616,11 @@ define void @test_vsoxseg6_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9776,11 +9636,11 @@ define void @test_vsoxseg6_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg6_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -9793,11 +9653,11 @@ define void @test_vsoxseg6_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -9813,12 +9673,12 @@ define void @test_vsoxseg7_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9831,12 +9691,12 @@ define void @test_vsoxseg7_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9852,12 +9712,12 @@ define void @test_vsoxseg7_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9870,12 +9730,12 @@ define void @test_vsoxseg7_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9891,12 +9751,12 @@ define void @test_vsoxseg7_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9909,12 +9769,12 @@ define void @test_vsoxseg7_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9930,12 +9790,12 @@ define void @test_vsoxseg7_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg7_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -9948,12 +9808,12 @@ define void @test_vsoxseg7_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -9969,13 +9829,13 @@ define void @test_vsoxseg8_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9988,13 +9848,13 @@ define void @test_vsoxseg8_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10010,13 +9870,13 @@ define void @test_vsoxseg8_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10029,13 +9889,13 @@ define void @test_vsoxseg8_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10051,13 +9911,13 @@ define void @test_vsoxseg8_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10070,13 +9930,13 @@ define void @test_vsoxseg8_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10092,13 +9952,13 @@ define void @test_vsoxseg8_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsoxseg8_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -10111,13 +9971,13 @@ define void @test_vsoxseg8_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -10132,7 +9992,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32>,<vscal
 define void @test_vsoxseg2_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -10146,7 +10005,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -10163,7 +10021,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32>,<vscale
 define void @test_vsoxseg2_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -10177,7 +10034,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -10194,7 +10050,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i64(<vscale x 8 x i32>,<vscal
 define void @test_vsoxseg2_nxv8i32_nxv8i64(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v16
@@ -10207,7 +10062,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i32_nxv8i64(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v16, v0.t
@@ -10223,7 +10077,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32>,<vscal
 define void @test_vsoxseg2_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -10237,7 +10090,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -10254,7 +10106,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8>,<vsca
 define void @test_vsoxseg2_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei16.v v8, (a0), v16
@@ -10267,7 +10118,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei16.v v8, (a0), v16, v0.t
@@ -10283,7 +10133,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8>,<vscal
 define void @test_vsoxseg2_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -10297,7 +10146,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -10314,7 +10162,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscal
 define void @test_vsoxseg2_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -10328,7 +10175,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -10345,7 +10191,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale
 define void @test_vsoxseg2_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -10359,7 +10204,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -10376,7 +10220,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscal
 define void @test_vsoxseg2_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -10390,7 +10233,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -10407,7 +10249,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscal
 define void @test_vsoxseg2_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v10
@@ -10420,7 +10261,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v10, v0.t
@@ -10437,8 +10277,8 @@ define void @test_vsoxseg3_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10451,8 +10291,8 @@ define void @test_vsoxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10468,8 +10308,8 @@ define void @test_vsoxseg3_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10482,8 +10322,8 @@ define void @test_vsoxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10499,8 +10339,8 @@ define void @test_vsoxseg3_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10513,8 +10353,8 @@ define void @test_vsoxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10529,11 +10369,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscal
 define void @test_vsoxseg3_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT:    vsoxseg3ei64.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
@@ -10543,11 +10383,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsoxseg3ei64.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
@@ -10561,9 +10401,9 @@ define void @test_vsoxseg4_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10576,9 +10416,9 @@ define void @test_vsoxseg4_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10594,9 +10434,9 @@ define void @test_vsoxseg4_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10609,9 +10449,9 @@ define void @test_vsoxseg4_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10627,9 +10467,9 @@ define void @test_vsoxseg4_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10642,9 +10482,9 @@ define void @test_vsoxseg4_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10660,9 +10500,9 @@ define void @test_vsoxseg4_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -10675,9 +10515,9 @@ define void @test_vsoxseg4_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -10693,10 +10533,10 @@ define void @test_vsoxseg5_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10709,10 +10549,10 @@ define void @test_vsoxseg5_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10728,10 +10568,10 @@ define void @test_vsoxseg5_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10744,10 +10584,10 @@ define void @test_vsoxseg5_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10763,10 +10603,10 @@ define void @test_vsoxseg5_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10779,10 +10619,10 @@ define void @test_vsoxseg5_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10798,10 +10638,10 @@ define void @test_vsoxseg5_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -10814,10 +10654,10 @@ define void @test_vsoxseg5_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -10833,11 +10673,11 @@ define void @test_vsoxseg6_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10850,11 +10690,11 @@ define void @test_vsoxseg6_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10870,11 +10710,11 @@ define void @test_vsoxseg6_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10887,11 +10727,11 @@ define void @test_vsoxseg6_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10907,11 +10747,11 @@ define void @test_vsoxseg6_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10924,11 +10764,11 @@ define void @test_vsoxseg6_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10944,11 +10784,11 @@ define void @test_vsoxseg6_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -10961,11 +10801,11 @@ define void @test_vsoxseg6_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -10981,12 +10821,12 @@ define void @test_vsoxseg7_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10999,12 +10839,12 @@ define void @test_vsoxseg7_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11020,12 +10860,12 @@ define void @test_vsoxseg7_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11038,12 +10878,12 @@ define void @test_vsoxseg7_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11059,12 +10899,12 @@ define void @test_vsoxseg7_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11077,12 +10917,12 @@ define void @test_vsoxseg7_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11098,12 +10938,12 @@ define void @test_vsoxseg7_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11116,12 +10956,12 @@ define void @test_vsoxseg7_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11137,13 +10977,13 @@ define void @test_vsoxseg8_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11156,13 +10996,13 @@ define void @test_vsoxseg8_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11178,13 +11018,13 @@ define void @test_vsoxseg8_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11197,13 +11037,13 @@ define void @test_vsoxseg8_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11219,13 +11059,13 @@ define void @test_vsoxseg8_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11238,13 +11078,13 @@ define void @test_vsoxseg8_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11260,13 +11100,13 @@ define void @test_vsoxseg8_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11279,13 +11119,13 @@ define void @test_vsoxseg8_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11300,7 +11140,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i32(<vscale x 2 x i64>,<vscal
 define void @test_vsoxseg2_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11314,7 +11153,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11331,7 +11169,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i8(<vscale x 2 x i64>,<vscale
 define void @test_vsoxseg2_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11345,7 +11182,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11362,7 +11198,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i16(<vscale x 2 x i64>,<vscal
 define void @test_vsoxseg2_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11376,7 +11211,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11393,7 +11227,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2i64.nxv2i64(<vscale x 2 x i64>,<vscal
 define void @test_vsoxseg2_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11407,7 +11240,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11425,8 +11257,8 @@ define void @test_vsoxseg3_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11439,8 +11271,8 @@ define void @test_vsoxseg3_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11456,8 +11288,8 @@ define void @test_vsoxseg3_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11470,8 +11302,8 @@ define void @test_vsoxseg3_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11487,8 +11319,8 @@ define void @test_vsoxseg3_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11501,8 +11333,8 @@ define void @test_vsoxseg3_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11518,8 +11350,8 @@ define void @test_vsoxseg3_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11532,8 +11364,8 @@ define void @test_vsoxseg3_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11549,9 +11381,9 @@ define void @test_vsoxseg4_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11564,9 +11396,9 @@ define void @test_vsoxseg4_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11582,9 +11414,9 @@ define void @test_vsoxseg4_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11597,9 +11429,9 @@ define void @test_vsoxseg4_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %bas
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11615,9 +11447,9 @@ define void @test_vsoxseg4_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11630,9 +11462,9 @@ define void @test_vsoxseg4_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11648,9 +11480,9 @@ define void @test_vsoxseg4_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11663,9 +11495,9 @@ define void @test_vsoxseg4_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11680,7 +11512,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half>,<v
 define void @test_vsoxseg2_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -11694,7 +11525,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -11711,7 +11541,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half>,<vs
 define void @test_vsoxseg2_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -11725,7 +11554,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -11742,7 +11570,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half>,<v
 define void @test_vsoxseg2_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v16
@@ -11755,7 +11582,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v16, v0.t
@@ -11771,7 +11597,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double>,<vs
 define void @test_vsoxseg2_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -11785,7 +11610,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -11802,7 +11626,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double>,<vsc
 define void @test_vsoxseg2_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -11816,7 +11639,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -11833,7 +11655,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i64(<vscale x 4 x double>,<vs
 define void @test_vsoxseg2_nxv4f64_nxv4i64(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -11847,7 +11668,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f64_nxv4i64(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -11864,7 +11684,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double>,<vs
 define void @test_vsoxseg2_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -11878,7 +11697,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -11895,7 +11713,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i64(<vscale x 1 x double>,<vs
 define void @test_vsoxseg2_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -11909,7 +11726,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -11926,7 +11742,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vs
 define void @test_vsoxseg2_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -11940,7 +11755,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -11957,7 +11771,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vs
 define void @test_vsoxseg2_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -11971,7 +11784,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -11988,7 +11800,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vsc
 define void @test_vsoxseg2_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -12002,7 +11813,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -12020,8 +11830,8 @@ define void @test_vsoxseg3_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12034,8 +11844,8 @@ define void @test_vsoxseg3_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12051,8 +11861,8 @@ define void @test_vsoxseg3_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12065,8 +11875,8 @@ define void @test_vsoxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12082,8 +11892,8 @@ define void @test_vsoxseg3_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12096,8 +11906,8 @@ define void @test_vsoxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12113,8 +11923,8 @@ define void @test_vsoxseg3_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12127,8 +11937,8 @@ define void @test_vsoxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12144,9 +11954,9 @@ define void @test_vsoxseg4_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12159,9 +11969,9 @@ define void @test_vsoxseg4_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12177,9 +11987,9 @@ define void @test_vsoxseg4_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12192,9 +12002,9 @@ define void @test_vsoxseg4_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12210,9 +12020,9 @@ define void @test_vsoxseg4_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12225,9 +12035,9 @@ define void @test_vsoxseg4_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12243,9 +12053,9 @@ define void @test_vsoxseg4_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12258,9 +12068,9 @@ define void @test_vsoxseg4_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12276,10 +12086,10 @@ define void @test_vsoxseg5_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12292,10 +12102,10 @@ define void @test_vsoxseg5_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12311,10 +12121,10 @@ define void @test_vsoxseg5_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12327,10 +12137,10 @@ define void @test_vsoxseg5_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12346,10 +12156,10 @@ define void @test_vsoxseg5_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12362,10 +12172,10 @@ define void @test_vsoxseg5_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12381,10 +12191,10 @@ define void @test_vsoxseg5_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12397,10 +12207,10 @@ define void @test_vsoxseg5_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12416,11 +12226,11 @@ define void @test_vsoxseg6_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12433,11 +12243,11 @@ define void @test_vsoxseg6_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12453,11 +12263,11 @@ define void @test_vsoxseg6_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12470,11 +12280,11 @@ define void @test_vsoxseg6_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12490,11 +12300,11 @@ define void @test_vsoxseg6_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12507,11 +12317,11 @@ define void @test_vsoxseg6_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12527,11 +12337,11 @@ define void @test_vsoxseg6_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12544,11 +12354,11 @@ define void @test_vsoxseg6_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12564,12 +12374,12 @@ define void @test_vsoxseg7_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12582,12 +12392,12 @@ define void @test_vsoxseg7_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12603,12 +12413,12 @@ define void @test_vsoxseg7_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12621,12 +12431,12 @@ define void @test_vsoxseg7_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12642,12 +12452,12 @@ define void @test_vsoxseg7_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12660,12 +12470,12 @@ define void @test_vsoxseg7_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12681,12 +12491,12 @@ define void @test_vsoxseg7_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12699,12 +12509,12 @@ define void @test_vsoxseg7_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12720,13 +12530,13 @@ define void @test_vsoxseg8_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12739,13 +12549,13 @@ define void @test_vsoxseg8_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12761,13 +12571,13 @@ define void @test_vsoxseg8_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12780,13 +12590,13 @@ define void @test_vsoxseg8_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12802,13 +12612,13 @@ define void @test_vsoxseg8_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12821,13 +12631,13 @@ define void @test_vsoxseg8_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12843,13 +12653,13 @@ define void @test_vsoxseg8_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12862,13 +12672,13 @@ define void @test_vsoxseg8_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12883,7 +12693,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vsc
 define void @test_vsoxseg2_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -12897,7 +12706,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -12914,7 +12722,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vsca
 define void @test_vsoxseg2_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -12928,7 +12735,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -12945,7 +12751,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vsc
 define void @test_vsoxseg2_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -12959,7 +12764,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -12976,7 +12780,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f32.nxv2i64(<vscale x 2 x float>,<vsc
 define void @test_vsoxseg2_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v10
@@ -12989,7 +12792,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v10, v0.t
@@ -13006,8 +12808,8 @@ define void @test_vsoxseg3_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13020,8 +12822,8 @@ define void @test_vsoxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13037,8 +12839,8 @@ define void @test_vsoxseg3_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13051,8 +12853,8 @@ define void @test_vsoxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13068,8 +12870,8 @@ define void @test_vsoxseg3_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13082,8 +12884,8 @@ define void @test_vsoxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13098,11 +12900,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64(<vscale x 2 x float>,<vsc
 define void @test_vsoxseg3_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vsoxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT:    vsoxseg3ei64.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
@@ -13112,11 +12914,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vsoxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsoxseg3ei64.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
@@ -13130,9 +12932,9 @@ define void @test_vsoxseg4_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13145,9 +12947,9 @@ define void @test_vsoxseg4_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13163,9 +12965,9 @@ define void @test_vsoxseg4_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13178,9 +12980,9 @@ define void @test_vsoxseg4_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13196,9 +12998,9 @@ define void @test_vsoxseg4_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13211,9 +13013,9 @@ define void @test_vsoxseg4_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13229,9 +13031,9 @@ define void @test_vsoxseg4_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13244,9 +13046,9 @@ define void @test_vsoxseg4_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13262,10 +13064,10 @@ define void @test_vsoxseg5_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13278,10 +13080,10 @@ define void @test_vsoxseg5_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13297,10 +13099,10 @@ define void @test_vsoxseg5_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13313,10 +13115,10 @@ define void @test_vsoxseg5_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13332,10 +13134,10 @@ define void @test_vsoxseg5_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13348,10 +13150,10 @@ define void @test_vsoxseg5_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13367,10 +13169,10 @@ define void @test_vsoxseg5_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13383,10 +13185,10 @@ define void @test_vsoxseg5_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13402,11 +13204,11 @@ define void @test_vsoxseg6_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13419,11 +13221,11 @@ define void @test_vsoxseg6_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13439,11 +13241,11 @@ define void @test_vsoxseg6_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13456,11 +13258,11 @@ define void @test_vsoxseg6_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13476,11 +13278,11 @@ define void @test_vsoxseg6_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13493,11 +13295,11 @@ define void @test_vsoxseg6_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13513,11 +13315,11 @@ define void @test_vsoxseg6_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13530,11 +13332,11 @@ define void @test_vsoxseg6_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13550,12 +13352,12 @@ define void @test_vsoxseg7_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13568,12 +13370,12 @@ define void @test_vsoxseg7_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13589,12 +13391,12 @@ define void @test_vsoxseg7_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13607,12 +13409,12 @@ define void @test_vsoxseg7_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13628,12 +13430,12 @@ define void @test_vsoxseg7_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13646,12 +13448,12 @@ define void @test_vsoxseg7_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13667,12 +13469,12 @@ define void @test_vsoxseg7_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13685,12 +13487,12 @@ define void @test_vsoxseg7_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13706,13 +13508,13 @@ define void @test_vsoxseg8_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13725,13 +13527,13 @@ define void @test_vsoxseg8_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13747,13 +13549,13 @@ define void @test_vsoxseg8_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13766,13 +13568,13 @@ define void @test_vsoxseg8_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13788,13 +13590,13 @@ define void @test_vsoxseg8_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13807,13 +13609,13 @@ define void @test_vsoxseg8_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13829,13 +13631,13 @@ define void @test_vsoxseg8_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13848,13 +13650,13 @@ define void @test_vsoxseg8_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13869,7 +13671,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i64(<vscale x 1 x half>,<vsca
 define void @test_vsoxseg2_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -13883,7 +13684,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -13900,7 +13700,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vsca
 define void @test_vsoxseg2_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -13914,7 +13713,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -13931,7 +13729,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vsca
 define void @test_vsoxseg2_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -13945,7 +13742,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -13962,7 +13758,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscal
 define void @test_vsoxseg2_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -13976,7 +13771,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -13994,8 +13788,8 @@ define void @test_vsoxseg3_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14008,8 +13802,8 @@ define void @test_vsoxseg3_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14025,8 +13819,8 @@ define void @test_vsoxseg3_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14039,8 +13833,8 @@ define void @test_vsoxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14056,8 +13850,8 @@ define void @test_vsoxseg3_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14070,8 +13864,8 @@ define void @test_vsoxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14087,8 +13881,8 @@ define void @test_vsoxseg3_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14101,8 +13895,8 @@ define void @test_vsoxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14118,9 +13912,9 @@ define void @test_vsoxseg4_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14133,9 +13927,9 @@ define void @test_vsoxseg4_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14151,9 +13945,9 @@ define void @test_vsoxseg4_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14166,9 +13960,9 @@ define void @test_vsoxseg4_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14184,9 +13978,9 @@ define void @test_vsoxseg4_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14199,9 +13993,9 @@ define void @test_vsoxseg4_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14217,9 +14011,9 @@ define void @test_vsoxseg4_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14232,9 +14026,9 @@ define void @test_vsoxseg4_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14250,10 +14044,10 @@ define void @test_vsoxseg5_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14266,10 +14060,10 @@ define void @test_vsoxseg5_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14285,10 +14079,10 @@ define void @test_vsoxseg5_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14301,10 +14095,10 @@ define void @test_vsoxseg5_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14320,10 +14114,10 @@ define void @test_vsoxseg5_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14336,10 +14130,10 @@ define void @test_vsoxseg5_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14355,10 +14149,10 @@ define void @test_vsoxseg5_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14371,10 +14165,10 @@ define void @test_vsoxseg5_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14390,11 +14184,11 @@ define void @test_vsoxseg6_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14407,11 +14201,11 @@ define void @test_vsoxseg6_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14427,11 +14221,11 @@ define void @test_vsoxseg6_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14444,11 +14238,11 @@ define void @test_vsoxseg6_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14464,11 +14258,11 @@ define void @test_vsoxseg6_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14481,11 +14275,11 @@ define void @test_vsoxseg6_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14501,11 +14295,11 @@ define void @test_vsoxseg6_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14518,11 +14312,11 @@ define void @test_vsoxseg6_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14538,12 +14332,12 @@ define void @test_vsoxseg7_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14556,12 +14350,12 @@ define void @test_vsoxseg7_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14577,12 +14371,12 @@ define void @test_vsoxseg7_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14595,12 +14389,12 @@ define void @test_vsoxseg7_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14616,12 +14410,12 @@ define void @test_vsoxseg7_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14634,12 +14428,12 @@ define void @test_vsoxseg7_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14655,12 +14449,12 @@ define void @test_vsoxseg7_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14673,12 +14467,12 @@ define void @test_vsoxseg7_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14694,13 +14488,13 @@ define void @test_vsoxseg8_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14713,13 +14507,13 @@ define void @test_vsoxseg8_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14735,13 +14529,13 @@ define void @test_vsoxseg8_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14754,13 +14548,13 @@ define void @test_vsoxseg8_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14776,13 +14570,13 @@ define void @test_vsoxseg8_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14795,13 +14589,13 @@ define void @test_vsoxseg8_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14817,13 +14611,13 @@ define void @test_vsoxseg8_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14836,13 +14630,13 @@ define void @test_vsoxseg8_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14857,7 +14651,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i64(<vscale x 1 x float>,<vsc
 define void @test_vsoxseg2_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -14871,7 +14664,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -14888,7 +14680,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vsc
 define void @test_vsoxseg2_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -14902,7 +14693,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -14919,7 +14709,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vsc
 define void @test_vsoxseg2_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -14933,7 +14722,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -14950,7 +14738,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vsca
 define void @test_vsoxseg2_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -14964,7 +14751,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -14982,8 +14768,8 @@ define void @test_vsoxseg3_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14996,8 +14782,8 @@ define void @test_vsoxseg3_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15013,8 +14799,8 @@ define void @test_vsoxseg3_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15027,8 +14813,8 @@ define void @test_vsoxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15044,8 +14830,8 @@ define void @test_vsoxseg3_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15058,8 +14844,8 @@ define void @test_vsoxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15075,8 +14861,8 @@ define void @test_vsoxseg3_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15089,8 +14875,8 @@ define void @test_vsoxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15106,9 +14892,9 @@ define void @test_vsoxseg4_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15121,9 +14907,9 @@ define void @test_vsoxseg4_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15139,9 +14925,9 @@ define void @test_vsoxseg4_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15154,9 +14940,9 @@ define void @test_vsoxseg4_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15172,9 +14958,9 @@ define void @test_vsoxseg4_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15187,9 +14973,9 @@ define void @test_vsoxseg4_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15205,9 +14991,9 @@ define void @test_vsoxseg4_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15220,9 +15006,9 @@ define void @test_vsoxseg4_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15238,10 +15024,10 @@ define void @test_vsoxseg5_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15254,10 +15040,10 @@ define void @test_vsoxseg5_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15273,10 +15059,10 @@ define void @test_vsoxseg5_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15289,10 +15075,10 @@ define void @test_vsoxseg5_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15308,10 +15094,10 @@ define void @test_vsoxseg5_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15324,10 +15110,10 @@ define void @test_vsoxseg5_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15343,10 +15129,10 @@ define void @test_vsoxseg5_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15359,10 +15145,10 @@ define void @test_vsoxseg5_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15378,11 +15164,11 @@ define void @test_vsoxseg6_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15395,11 +15181,11 @@ define void @test_vsoxseg6_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15415,11 +15201,11 @@ define void @test_vsoxseg6_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15432,11 +15218,11 @@ define void @test_vsoxseg6_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15452,11 +15238,11 @@ define void @test_vsoxseg6_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15469,11 +15255,11 @@ define void @test_vsoxseg6_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15489,11 +15275,11 @@ define void @test_vsoxseg6_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15506,11 +15292,11 @@ define void @test_vsoxseg6_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15526,12 +15312,12 @@ define void @test_vsoxseg7_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15544,12 +15330,12 @@ define void @test_vsoxseg7_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15565,12 +15351,12 @@ define void @test_vsoxseg7_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15583,12 +15369,12 @@ define void @test_vsoxseg7_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15604,12 +15390,12 @@ define void @test_vsoxseg7_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15622,12 +15408,12 @@ define void @test_vsoxseg7_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15643,12 +15429,12 @@ define void @test_vsoxseg7_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15661,12 +15447,12 @@ define void @test_vsoxseg7_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15682,13 +15468,13 @@ define void @test_vsoxseg8_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15701,13 +15487,13 @@ define void @test_vsoxseg8_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15723,13 +15509,13 @@ define void @test_vsoxseg8_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15742,13 +15528,13 @@ define void @test_vsoxseg8_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15764,13 +15550,13 @@ define void @test_vsoxseg8_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15783,13 +15569,13 @@ define void @test_vsoxseg8_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15805,13 +15591,13 @@ define void @test_vsoxseg8_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15824,13 +15610,13 @@ define void @test_vsoxseg8_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15845,7 +15631,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half>,<vsca
 define void @test_vsoxseg2_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -15859,7 +15644,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -15876,7 +15660,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscal
 define void @test_vsoxseg2_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -15890,7 +15673,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -15907,7 +15689,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i64(<vscale x 8 x half>,<vsca
 define void @test_vsoxseg2_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v16
@@ -15920,7 +15701,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v16, v0.t
@@ -15936,7 +15716,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half>,<vsca
 define void @test_vsoxseg2_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v12
@@ -15949,7 +15728,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v12, v0.t
@@ -15966,8 +15744,8 @@ define void @test_vsoxseg3_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -15980,8 +15758,8 @@ define void @test_vsoxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -15997,8 +15775,8 @@ define void @test_vsoxseg3_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16011,8 +15789,8 @@ define void @test_vsoxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16027,7 +15805,6 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i64(<vscale x 8 x half>,<vsca
 define void @test_vsoxseg3_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -16041,7 +15818,6 @@ entry:
 define void @test_vsoxseg3_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -16058,11 +15834,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half>,<vsca
 define void @test_vsoxseg3_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
@@ -16072,11 +15848,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
@@ -16090,9 +15866,9 @@ define void @test_vsoxseg4_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16105,9 +15881,9 @@ define void @test_vsoxseg4_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16123,9 +15899,9 @@ define void @test_vsoxseg4_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16138,9 +15914,9 @@ define void @test_vsoxseg4_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16155,7 +15931,6 @@ declare void @llvm.riscv.vsoxseg4.mask.nxv8f16.nxv8i64(<vscale x 8 x half>,<vsca
 define void @test_vsoxseg4_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -16170,7 +15945,6 @@ entry:
 define void @test_vsoxseg4_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -16189,9 +15963,9 @@ define void @test_vsoxseg4_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -16204,9 +15978,9 @@ define void @test_vsoxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -16221,7 +15995,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float>,<vsc
 define void @test_vsoxseg2_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -16235,7 +16008,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -16252,7 +16024,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float>,<vsca
 define void @test_vsoxseg2_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -16266,7 +16037,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -16283,7 +16053,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i64(<vscale x 8 x float>,<vsc
 define void @test_vsoxseg2_nxv8f32_nxv8i64(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v16
@@ -16296,7 +16065,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8f32_nxv8i64(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v16, v0.t
@@ -16312,7 +16080,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float>,<vsc
 define void @test_vsoxseg2_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -16326,7 +16093,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -16343,7 +16109,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double>,<vs
 define void @test_vsoxseg2_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -16357,7 +16122,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -16374,7 +16138,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double>,<vsc
 define void @test_vsoxseg2_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -16388,7 +16151,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -16405,7 +16167,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double>,<vs
 define void @test_vsoxseg2_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -16419,7 +16180,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -16436,7 +16196,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f64.nxv2i64(<vscale x 2 x double>,<vs
 define void @test_vsoxseg2_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -16450,7 +16209,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -16468,8 +16226,8 @@ define void @test_vsoxseg3_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16482,8 +16240,8 @@ define void @test_vsoxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16499,8 +16257,8 @@ define void @test_vsoxseg3_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16513,8 +16271,8 @@ define void @test_vsoxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16530,8 +16288,8 @@ define void @test_vsoxseg3_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16544,8 +16302,8 @@ define void @test_vsoxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16561,8 +16319,8 @@ define void @test_vsoxseg3_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg3_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16575,8 +16333,8 @@ define void @test_vsoxseg3_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16592,9 +16350,9 @@ define void @test_vsoxseg4_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16607,9 +16365,9 @@ define void @test_vsoxseg4_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16625,9 +16383,9 @@ define void @test_vsoxseg4_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16640,9 +16398,9 @@ define void @test_vsoxseg4_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16658,9 +16416,9 @@ define void @test_vsoxseg4_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16673,9 +16431,9 @@ define void @test_vsoxseg4_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16691,9 +16449,9 @@ define void @test_vsoxseg4_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsoxseg4_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16706,9 +16464,9 @@ define void @test_vsoxseg4_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16723,7 +16481,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vsca
 define void @test_vsoxseg2_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v10
@@ -16736,7 +16493,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei32.v v8, (a0), v10, v0.t
@@ -16752,7 +16508,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscal
 define void @test_vsoxseg2_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -16766,7 +16521,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -16783,7 +16537,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vsca
 define void @test_vsoxseg2_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v12
@@ -16796,7 +16549,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v12, v0.t
@@ -16812,7 +16564,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vsca
 define void @test_vsoxseg2_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -16826,7 +16577,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -16843,11 +16593,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vsca
 define void @test_vsoxseg3_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
@@ -16857,11 +16607,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsoxseg3ei32.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
@@ -16875,8 +16625,8 @@ define void @test_vsoxseg3_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -16889,8 +16639,8 @@ define void @test_vsoxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -16905,7 +16655,6 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vsca
 define void @test_vsoxseg3_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -16919,7 +16668,6 @@ entry:
 define void @test_vsoxseg3_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -16937,8 +16685,8 @@ define void @test_vsoxseg3_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -16951,8 +16699,8 @@ define void @test_vsoxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -16968,9 +16716,9 @@ define void @test_vsoxseg4_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16983,9 +16731,9 @@ define void @test_vsoxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -17001,9 +16749,9 @@ define void @test_vsoxseg4_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17016,9 +16764,9 @@ define void @test_vsoxseg4_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17033,7 +16781,6 @@ declare void @llvm.riscv.vsoxseg4.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vsca
 define void @test_vsoxseg4_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -17048,7 +16795,6 @@ entry:
 define void @test_vsoxseg4_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -17067,9 +16813,9 @@ define void @test_vsoxseg4_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17082,9 +16828,9 @@ define void @test_vsoxseg4_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17100,10 +16846,10 @@ define void @test_vsoxseg5_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -17116,10 +16862,10 @@ define void @test_vsoxseg5_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -17135,10 +16881,10 @@ define void @test_vsoxseg5_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17151,10 +16897,10 @@ define void @test_vsoxseg5_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17169,13 +16915,13 @@ declare void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vsca
 define void @test_vsoxseg5_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsoxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT:    vsoxseg5ei64.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg5.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
@@ -17185,13 +16931,13 @@ entry:
 define void @test_vsoxseg5_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsoxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsoxseg5ei64.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg5.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
@@ -17205,10 +16951,10 @@ define void @test_vsoxseg5_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17221,10 +16967,10 @@ define void @test_vsoxseg5_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17240,11 +16986,11 @@ define void @test_vsoxseg6_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -17257,11 +17003,11 @@ define void @test_vsoxseg6_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -17277,11 +17023,11 @@ define void @test_vsoxseg6_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17294,11 +17040,11 @@ define void @test_vsoxseg6_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17314,11 +17060,11 @@ define void @test_vsoxseg6_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -17331,11 +17077,11 @@ define void @test_vsoxseg6_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -17351,11 +17097,11 @@ define void @test_vsoxseg6_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17368,11 +17114,11 @@ define void @test_vsoxseg6_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17388,12 +17134,12 @@ define void @test_vsoxseg7_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -17406,12 +17152,12 @@ define void @test_vsoxseg7_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -17427,12 +17173,12 @@ define void @test_vsoxseg7_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17445,12 +17191,12 @@ define void @test_vsoxseg7_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17466,12 +17212,12 @@ define void @test_vsoxseg7_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -17484,12 +17230,12 @@ define void @test_vsoxseg7_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -17505,12 +17251,12 @@ define void @test_vsoxseg7_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17523,12 +17269,12 @@ define void @test_vsoxseg7_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17544,13 +17290,13 @@ define void @test_vsoxseg8_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -17563,13 +17309,13 @@ define void @test_vsoxseg8_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -17585,13 +17331,13 @@ define void @test_vsoxseg8_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17604,13 +17350,13 @@ define void @test_vsoxseg8_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17626,13 +17372,13 @@ define void @test_vsoxseg8_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -17645,13 +17391,13 @@ define void @test_vsoxseg8_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -17667,13 +17413,13 @@ define void @test_vsoxseg8_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17686,13 +17432,13 @@ define void @test_vsoxseg8_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17707,7 +17453,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vsca
 define void @test_vsoxseg2_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -17721,7 +17466,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -17738,7 +17482,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscal
 define void @test_vsoxseg2_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -17752,7 +17495,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -17769,7 +17511,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vsca
 define void @test_vsoxseg2_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -17783,7 +17524,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -17800,7 +17540,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv2f16.nxv2i64(<vscale x 2 x half>,<vsca
 define void @test_vsoxseg2_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v10
@@ -17813,7 +17552,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v10, v0.t
@@ -17830,8 +17568,8 @@ define void @test_vsoxseg3_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17844,8 +17582,8 @@ define void @test_vsoxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17861,8 +17599,8 @@ define void @test_vsoxseg3_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17875,8 +17613,8 @@ define void @test_vsoxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17892,8 +17630,8 @@ define void @test_vsoxseg3_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17906,8 +17644,8 @@ define void @test_vsoxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17922,11 +17660,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64(<vscale x 2 x half>,<vsca
 define void @test_vsoxseg3_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT:    vsoxseg3ei64.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
@@ -17936,11 +17674,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsoxseg3ei64.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
@@ -17954,9 +17692,9 @@ define void @test_vsoxseg4_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17969,9 +17707,9 @@ define void @test_vsoxseg4_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17987,9 +17725,9 @@ define void @test_vsoxseg4_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18002,9 +17740,9 @@ define void @test_vsoxseg4_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18020,9 +17758,9 @@ define void @test_vsoxseg4_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18035,9 +17773,9 @@ define void @test_vsoxseg4_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18053,9 +17791,9 @@ define void @test_vsoxseg4_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18068,9 +17806,9 @@ define void @test_vsoxseg4_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18086,10 +17824,10 @@ define void @test_vsoxseg5_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18102,10 +17840,10 @@ define void @test_vsoxseg5_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18121,10 +17859,10 @@ define void @test_vsoxseg5_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18137,10 +17875,10 @@ define void @test_vsoxseg5_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18156,10 +17894,10 @@ define void @test_vsoxseg5_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18172,10 +17910,10 @@ define void @test_vsoxseg5_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18191,10 +17929,10 @@ define void @test_vsoxseg5_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg5_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18207,10 +17945,10 @@ define void @test_vsoxseg5_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg5_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18226,11 +17964,11 @@ define void @test_vsoxseg6_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18243,11 +17981,11 @@ define void @test_vsoxseg6_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18263,11 +18001,11 @@ define void @test_vsoxseg6_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18280,11 +18018,11 @@ define void @test_vsoxseg6_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18300,11 +18038,11 @@ define void @test_vsoxseg6_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18317,11 +18055,11 @@ define void @test_vsoxseg6_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18337,11 +18075,11 @@ define void @test_vsoxseg6_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg6_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18354,11 +18092,11 @@ define void @test_vsoxseg6_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg6_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18374,12 +18112,12 @@ define void @test_vsoxseg7_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18392,12 +18130,12 @@ define void @test_vsoxseg7_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18413,12 +18151,12 @@ define void @test_vsoxseg7_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18431,12 +18169,12 @@ define void @test_vsoxseg7_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18452,12 +18190,12 @@ define void @test_vsoxseg7_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18470,12 +18208,12 @@ define void @test_vsoxseg7_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18491,12 +18229,12 @@ define void @test_vsoxseg7_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg7_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18509,12 +18247,12 @@ define void @test_vsoxseg7_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg7_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18530,13 +18268,13 @@ define void @test_vsoxseg8_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18549,13 +18287,13 @@ define void @test_vsoxseg8_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18571,13 +18309,13 @@ define void @test_vsoxseg8_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18590,13 +18328,13 @@ define void @test_vsoxseg8_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18612,13 +18350,13 @@ define void @test_vsoxseg8_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18631,13 +18369,13 @@ define void @test_vsoxseg8_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18653,13 +18391,13 @@ define void @test_vsoxseg8_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg8_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18672,13 +18410,13 @@ define void @test_vsoxseg8_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg8_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsoxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18693,7 +18431,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float>,<vsc
 define void @test_vsoxseg2_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -18707,7 +18444,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -18724,7 +18460,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float>,<vsca
 define void @test_vsoxseg2_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -18738,7 +18473,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -18755,7 +18489,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i64(<vscale x 4 x float>,<vsc
 define void @test_vsoxseg2_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v12
@@ -18768,7 +18501,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg2ei64.v v8, (a0), v12, v0.t
@@ -18784,7 +18516,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float>,<vsc
 define void @test_vsoxseg2_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -18798,7 +18529,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -18816,8 +18546,8 @@ define void @test_vsoxseg3_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18830,8 +18560,8 @@ define void @test_vsoxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18847,8 +18577,8 @@ define void @test_vsoxseg3_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18861,8 +18591,8 @@ define void @test_vsoxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18877,11 +18607,11 @@ declare void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64(<vscale x 4 x float>,<vsc
 define void @test_vsoxseg3_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei64.v v16, (a0), v12
+; CHECK-NEXT:    vsoxseg3ei64.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
@@ -18891,11 +18621,11 @@ entry:
 define void @test_vsoxseg3_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vsoxseg3ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsoxseg3ei64.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsoxseg3.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
@@ -18909,8 +18639,8 @@ define void @test_vsoxseg3_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg3_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18923,8 +18653,8 @@ define void @test_vsoxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg3_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18940,9 +18670,9 @@ define void @test_vsoxseg4_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18955,9 +18685,9 @@ define void @test_vsoxseg4_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18973,9 +18703,9 @@ define void @test_vsoxseg4_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18988,9 +18718,9 @@ define void @test_vsoxseg4_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -19006,9 +18736,9 @@ define void @test_vsoxseg4_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -19021,9 +18751,9 @@ define void @test_vsoxseg4_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -19039,9 +18769,9 @@ define void @test_vsoxseg4_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsoxseg4_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -19054,9 +18784,9 @@ define void @test_vsoxseg4_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsoxseg4_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsoxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll
index f3ac92670e835..0cdef07b01109 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv32.ll
@@ -8,7 +8,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x
 define void @test_vsseg2_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -21,7 +20,6 @@ entry:
 define void @test_vsseg2_mask_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -37,7 +35,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,
 define void @test_vsseg2_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0)
@@ -50,7 +47,6 @@ entry:
 define void @test_vsseg2_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
@@ -66,7 +62,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,
 define void @test_vsseg3_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -80,7 +75,6 @@ entry:
 define void @test_vsseg3_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -97,7 +91,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,
 define void @test_vsseg4_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -112,7 +105,6 @@ entry:
 define void @test_vsseg4_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -130,7 +122,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,
 define void @test_vsseg5_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -146,7 +137,6 @@ entry:
 define void @test_vsseg5_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -165,7 +155,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,
 define void @test_vsseg6_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -182,7 +171,6 @@ entry:
 define void @test_vsseg6_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -202,7 +190,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,
 define void @test_vsseg7_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -220,7 +207,6 @@ entry:
 define void @test_vsseg7_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -241,7 +227,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,
 define void @test_vsseg8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -260,7 +245,6 @@ entry:
 define void @test_vsseg8_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -282,7 +266,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i
 define void @test_vsseg2_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0)
@@ -295,7 +278,6 @@ entry:
 define void @test_vsseg2_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
@@ -311,7 +293,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i
 define void @test_vsseg3_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -325,7 +306,6 @@ entry:
 define void @test_vsseg3_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -342,7 +322,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i
 define void @test_vsseg4_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -357,7 +336,6 @@ entry:
 define void @test_vsseg4_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -375,7 +353,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i3
 define void @test_vsseg2_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0)
@@ -388,7 +365,6 @@ entry:
 define void @test_vsseg2_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
@@ -404,7 +380,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i3
 define void @test_vsseg3_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -418,7 +393,6 @@ entry:
 define void @test_vsseg3_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -435,7 +409,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i3
 define void @test_vsseg4_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -450,7 +423,6 @@ entry:
 define void @test_vsseg4_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -468,7 +440,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i3
 define void @test_vsseg5_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -484,7 +455,6 @@ entry:
 define void @test_vsseg5_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -503,7 +473,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i3
 define void @test_vsseg6_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -520,7 +489,6 @@ entry:
 define void @test_vsseg6_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -540,7 +508,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i3
 define void @test_vsseg7_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -558,7 +525,6 @@ entry:
 define void @test_vsseg7_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -579,7 +545,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i3
 define void @test_vsseg8_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -598,7 +563,6 @@ entry:
 define void @test_vsseg8_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -620,7 +584,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i1
 define void @test_vsseg2_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -633,7 +596,6 @@ entry:
 define void @test_vsseg2_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -649,7 +611,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i1
 define void @test_vsseg3_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -663,7 +624,6 @@ entry:
 define void @test_vsseg3_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -680,7 +640,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i1
 define void @test_vsseg4_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -695,7 +654,6 @@ entry:
 define void @test_vsseg4_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -713,7 +671,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i1
 define void @test_vsseg5_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -729,7 +686,6 @@ entry:
 define void @test_vsseg5_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -748,7 +704,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i1
 define void @test_vsseg6_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -765,7 +720,6 @@ entry:
 define void @test_vsseg6_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -785,7 +739,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i1
 define void @test_vsseg7_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -803,7 +756,6 @@ entry:
 define void @test_vsseg7_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -824,7 +776,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i1
 define void @test_vsseg8_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -843,7 +794,6 @@ entry:
 define void @test_vsseg8_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -865,7 +815,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i3
 define void @test_vsseg2_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0)
@@ -878,7 +827,6 @@ entry:
 define void @test_vsseg2_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
@@ -894,7 +842,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i3
 define void @test_vsseg3_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -908,7 +855,6 @@ entry:
 define void @test_vsseg3_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -925,7 +871,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i3
 define void @test_vsseg4_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -940,7 +885,6 @@ entry:
 define void @test_vsseg4_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -958,7 +902,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i3
 define void @test_vsseg5_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -974,7 +917,6 @@ entry:
 define void @test_vsseg5_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -993,7 +935,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i3
 define void @test_vsseg6_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1010,7 +951,6 @@ entry:
 define void @test_vsseg6_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1030,7 +970,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i3
 define void @test_vsseg7_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1048,7 +987,6 @@ entry:
 define void @test_vsseg7_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1069,7 +1007,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i3
 define void @test_vsseg8_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1088,7 +1025,6 @@ entry:
 define void @test_vsseg8_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1110,7 +1046,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i1
 define void @test_vsseg2_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -1123,7 +1058,6 @@ entry:
 define void @test_vsseg2_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -1139,7 +1073,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i1
 define void @test_vsseg3_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1153,7 +1086,6 @@ entry:
 define void @test_vsseg3_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -1170,7 +1102,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i1
 define void @test_vsseg4_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -1185,7 +1116,6 @@ entry:
 define void @test_vsseg4_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -1203,7 +1133,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,
 define void @test_vsseg2_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0)
@@ -1216,7 +1145,6 @@ entry:
 define void @test_vsseg2_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
@@ -1232,7 +1160,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,
 define void @test_vsseg3_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1246,7 +1173,6 @@ entry:
 define void @test_vsseg3_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1263,7 +1189,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,
 define void @test_vsseg4_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1278,7 +1203,6 @@ entry:
 define void @test_vsseg4_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1296,7 +1220,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,
 define void @test_vsseg5_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1312,7 +1235,6 @@ entry:
 define void @test_vsseg5_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1331,7 +1253,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,
 define void @test_vsseg6_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1348,7 +1269,6 @@ entry:
 define void @test_vsseg6_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1368,7 +1288,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,
 define void @test_vsseg7_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1386,7 +1305,6 @@ entry:
 define void @test_vsseg7_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1407,7 +1325,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,
 define void @test_vsseg8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1426,7 +1343,6 @@ entry:
 define void @test_vsseg8_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1448,7 +1364,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i3
 define void @test_vsseg2_nxv8i32(<vscale x 8 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0)
@@ -1461,7 +1376,6 @@ entry:
 define void @test_vsseg2_mask_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
@@ -1477,7 +1391,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,
 define void @test_vsseg2_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0)
@@ -1490,7 +1403,6 @@ entry:
 define void @test_vsseg2_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
@@ -1506,7 +1418,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,
 define void @test_vsseg3_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1520,7 +1431,6 @@ entry:
 define void @test_vsseg3_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -1537,7 +1447,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,
 define void @test_vsseg4_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1552,7 +1461,6 @@ entry:
 define void @test_vsseg4_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1570,7 +1478,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,
 define void @test_vsseg5_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1586,7 +1493,6 @@ entry:
 define void @test_vsseg5_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1605,7 +1511,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,
 define void @test_vsseg6_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1622,7 +1527,6 @@ entry:
 define void @test_vsseg6_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1642,7 +1546,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,
 define void @test_vsseg7_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1660,7 +1563,6 @@ entry:
 define void @test_vsseg7_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1681,7 +1583,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,
 define void @test_vsseg8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1700,7 +1601,6 @@ entry:
 define void @test_vsseg8_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1722,7 +1622,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i1
 define void @test_vsseg2_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -1735,7 +1634,6 @@ entry:
 define void @test_vsseg2_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -1751,7 +1649,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i1
 define void @test_vsseg3_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1765,7 +1662,6 @@ entry:
 define void @test_vsseg3_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1782,7 +1678,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i1
 define void @test_vsseg4_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1797,7 +1692,6 @@ entry:
 define void @test_vsseg4_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1815,7 +1709,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i1
 define void @test_vsseg5_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1831,7 +1724,6 @@ entry:
 define void @test_vsseg5_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1850,7 +1742,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i1
 define void @test_vsseg6_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1867,7 +1758,6 @@ entry:
 define void @test_vsseg6_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1887,7 +1777,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i1
 define void @test_vsseg7_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1905,7 +1794,6 @@ entry:
 define void @test_vsseg7_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1926,7 +1814,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i1
 define void @test_vsseg8_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1945,7 +1832,6 @@ entry:
 define void @test_vsseg8_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1967,7 +1853,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i
 define void @test_vsseg2_nxv32i8(<vscale x 32 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0)
@@ -1980,7 +1865,6 @@ entry:
 define void @test_vsseg2_mask_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
@@ -1996,7 +1880,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,
 define void @test_vsseg2_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0)
@@ -2009,7 +1892,6 @@ entry:
 define void @test_vsseg2_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
@@ -2025,7 +1907,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,
 define void @test_vsseg3_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -2039,7 +1920,6 @@ entry:
 define void @test_vsseg3_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -2056,7 +1936,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,
 define void @test_vsseg4_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2071,7 +1950,6 @@ entry:
 define void @test_vsseg4_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2089,7 +1967,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,
 define void @test_vsseg5_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2105,7 +1982,6 @@ entry:
 define void @test_vsseg5_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2124,7 +2000,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,
 define void @test_vsseg6_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2141,7 +2016,6 @@ entry:
 define void @test_vsseg6_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2161,7 +2035,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,
 define void @test_vsseg7_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2179,7 +2052,6 @@ entry:
 define void @test_vsseg7_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2200,7 +2072,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,
 define void @test_vsseg8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2219,7 +2090,6 @@ entry:
 define void @test_vsseg8_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2241,7 +2111,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i1
 define void @test_vsseg2_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -2254,7 +2123,6 @@ entry:
 define void @test_vsseg2_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -2270,7 +2138,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i1
 define void @test_vsseg3_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -2284,7 +2151,6 @@ entry:
 define void @test_vsseg3_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -2301,7 +2167,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i1
 define void @test_vsseg4_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2316,7 +2181,6 @@ entry:
 define void @test_vsseg4_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2334,7 +2198,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i1
 define void @test_vsseg5_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2350,7 +2213,6 @@ entry:
 define void @test_vsseg5_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2369,7 +2231,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i1
 define void @test_vsseg6_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2386,7 +2247,6 @@ entry:
 define void @test_vsseg6_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2406,7 +2266,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i1
 define void @test_vsseg7_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2424,7 +2283,6 @@ entry:
 define void @test_vsseg7_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2445,7 +2303,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i1
 define void @test_vsseg8_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2464,7 +2321,6 @@ entry:
 define void @test_vsseg8_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2486,7 +2342,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i3
 define void @test_vsseg2_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0)
@@ -2499,7 +2354,6 @@ entry:
 define void @test_vsseg2_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
@@ -2515,7 +2369,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i3
 define void @test_vsseg3_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2529,7 +2382,6 @@ entry:
 define void @test_vsseg3_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -2546,7 +2398,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i3
 define void @test_vsseg4_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -2561,7 +2412,6 @@ entry:
 define void @test_vsseg4_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -2579,7 +2429,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv16f16(<vscale x 16 x half>,<vscale x 16
 define void @test_vsseg2_nxv16f16(<vscale x 16 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -2592,7 +2441,6 @@ entry:
 define void @test_vsseg2_mask_nxv16f16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -2608,7 +2456,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv4f64(<vscale x 4 x double>,<vscale x 4 x
 define void @test_vsseg2_nxv4f64(<vscale x 4 x double> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0)
@@ -2621,7 +2468,6 @@ entry:
 define void @test_vsseg2_mask_nxv4f64(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
@@ -2637,7 +2483,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x
 define void @test_vsseg2_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0)
@@ -2650,7 +2495,6 @@ entry:
 define void @test_vsseg2_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
@@ -2666,7 +2510,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x
 define void @test_vsseg3_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -2680,7 +2523,6 @@ entry:
 define void @test_vsseg3_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -2697,7 +2539,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x
 define void @test_vsseg4_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2712,7 +2553,6 @@ entry:
 define void @test_vsseg4_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2730,7 +2570,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x
 define void @test_vsseg5_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2746,7 +2585,6 @@ entry:
 define void @test_vsseg5_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2765,7 +2603,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x
 define void @test_vsseg6_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2782,7 +2619,6 @@ entry:
 define void @test_vsseg6_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2802,7 +2638,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x
 define void @test_vsseg7_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2820,7 +2655,6 @@ entry:
 define void @test_vsseg7_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2841,7 +2675,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x
 define void @test_vsseg8_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2860,7 +2693,6 @@ entry:
 define void @test_vsseg8_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2882,7 +2714,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vsseg2_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0)
@@ -2895,7 +2726,6 @@ entry:
 define void @test_vsseg2_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
@@ -2911,7 +2741,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vsseg3_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2925,7 +2754,6 @@ entry:
 define void @test_vsseg3_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -2942,7 +2770,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vsseg4_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2957,7 +2784,6 @@ entry:
 define void @test_vsseg4_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2975,7 +2801,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vsseg5_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2991,7 +2816,6 @@ entry:
 define void @test_vsseg5_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3010,7 +2834,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vsseg6_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3027,7 +2850,6 @@ entry:
 define void @test_vsseg6_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3047,7 +2869,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vsseg7_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3065,7 +2886,6 @@ entry:
 define void @test_vsseg7_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3086,7 +2906,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vsseg8_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3105,7 +2924,6 @@ entry:
 define void @test_vsseg8_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3127,7 +2945,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x h
 define void @test_vsseg2_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -3140,7 +2957,6 @@ entry:
 define void @test_vsseg2_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -3156,7 +2972,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x h
 define void @test_vsseg3_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -3170,7 +2985,6 @@ entry:
 define void @test_vsseg3_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -3187,7 +3001,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x h
 define void @test_vsseg4_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3202,7 +3015,6 @@ entry:
 define void @test_vsseg4_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3220,7 +3032,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x h
 define void @test_vsseg5_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3236,7 +3047,6 @@ entry:
 define void @test_vsseg5_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3255,7 +3065,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x h
 define void @test_vsseg6_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3272,7 +3081,6 @@ entry:
 define void @test_vsseg6_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3292,7 +3100,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x h
 define void @test_vsseg7_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3310,7 +3117,6 @@ entry:
 define void @test_vsseg7_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3331,7 +3137,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x h
 define void @test_vsseg8_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3350,7 +3155,6 @@ entry:
 define void @test_vsseg8_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3372,7 +3176,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vsseg2_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0)
@@ -3385,7 +3188,6 @@ entry:
 define void @test_vsseg2_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
@@ -3401,7 +3203,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vsseg3_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -3415,7 +3216,6 @@ entry:
 define void @test_vsseg3_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -3432,7 +3232,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vsseg4_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3447,7 +3246,6 @@ entry:
 define void @test_vsseg4_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3465,7 +3263,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vsseg5_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3481,7 +3278,6 @@ entry:
 define void @test_vsseg5_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3500,7 +3296,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vsseg6_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3517,7 +3312,6 @@ entry:
 define void @test_vsseg6_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3537,7 +3331,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vsseg7_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3555,7 +3348,6 @@ entry:
 define void @test_vsseg7_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3576,7 +3368,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vsseg8_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3595,7 +3386,6 @@ entry:
 define void @test_vsseg8_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3617,7 +3407,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x h
 define void @test_vsseg2_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -3630,7 +3419,6 @@ entry:
 define void @test_vsseg2_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -3646,7 +3434,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x h
 define void @test_vsseg3_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -3660,7 +3447,6 @@ entry:
 define void @test_vsseg3_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -3677,7 +3463,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x h
 define void @test_vsseg4_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -3692,7 +3477,6 @@ entry:
 define void @test_vsseg4_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -3710,7 +3494,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv8f32(<vscale x 8 x float>,<vscale x 8 x
 define void @test_vsseg2_nxv8f32(<vscale x 8 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0)
@@ -3723,7 +3506,6 @@ entry:
 define void @test_vsseg2_mask_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
@@ -3739,7 +3521,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x
 define void @test_vsseg2_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0)
@@ -3752,7 +3533,6 @@ entry:
 define void @test_vsseg2_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
@@ -3768,7 +3548,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x
 define void @test_vsseg3_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -3782,7 +3561,6 @@ entry:
 define void @test_vsseg3_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -3799,7 +3577,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x
 define void @test_vsseg4_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -3814,7 +3591,6 @@ entry:
 define void @test_vsseg4_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -3832,7 +3608,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x h
 define void @test_vsseg2_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -3845,7 +3620,6 @@ entry:
 define void @test_vsseg2_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -3861,7 +3635,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x h
 define void @test_vsseg3_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -3875,7 +3648,6 @@ entry:
 define void @test_vsseg3_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -3892,7 +3664,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x h
 define void @test_vsseg4_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3907,7 +3678,6 @@ entry:
 define void @test_vsseg4_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3925,7 +3695,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x h
 define void @test_vsseg5_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3941,7 +3710,6 @@ entry:
 define void @test_vsseg5_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3960,7 +3728,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x h
 define void @test_vsseg6_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3977,7 +3744,6 @@ entry:
 define void @test_vsseg6_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3997,7 +3763,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x h
 define void @test_vsseg7_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4015,7 +3780,6 @@ entry:
 define void @test_vsseg7_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4036,7 +3800,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x h
 define void @test_vsseg8_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4055,7 +3818,6 @@ entry:
 define void @test_vsseg8_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4077,7 +3839,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x h
 define void @test_vsseg2_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -4090,7 +3851,6 @@ entry:
 define void @test_vsseg2_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -4106,7 +3866,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x h
 define void @test_vsseg3_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -4120,7 +3879,6 @@ entry:
 define void @test_vsseg3_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -4137,7 +3895,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x h
 define void @test_vsseg4_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4152,7 +3909,6 @@ entry:
 define void @test_vsseg4_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4170,7 +3926,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x h
 define void @test_vsseg5_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4186,7 +3941,6 @@ entry:
 define void @test_vsseg5_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4205,7 +3959,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x h
 define void @test_vsseg6_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4222,7 +3975,6 @@ entry:
 define void @test_vsseg6_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4242,7 +3994,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x h
 define void @test_vsseg7_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4260,7 +4011,6 @@ entry:
 define void @test_vsseg7_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4281,7 +4031,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x h
 define void @test_vsseg8_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4300,7 +4049,6 @@ entry:
 define void @test_vsseg8_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4322,7 +4070,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x
 define void @test_vsseg2_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0)
@@ -4335,7 +4082,6 @@ entry:
 define void @test_vsseg2_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
@@ -4351,7 +4097,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x
 define void @test_vsseg3_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -4365,7 +4110,6 @@ entry:
 define void @test_vsseg3_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -4382,7 +4126,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x
 define void @test_vsseg4_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -4397,7 +4140,6 @@ entry:
 define void @test_vsseg4_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll
index 293bdbce00acf..3031cb4abc053 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsseg-rv64.ll
@@ -8,7 +8,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x
 define void @test_vsseg2_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -21,7 +20,6 @@ entry:
 define void @test_vsseg2_mask_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -37,7 +35,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i3
 define void @test_vsseg2_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0)
@@ -50,7 +47,6 @@ entry:
 define void @test_vsseg2_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
@@ -66,7 +62,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i3
 define void @test_vsseg3_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -80,7 +75,6 @@ entry:
 define void @test_vsseg3_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -97,7 +91,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i3
 define void @test_vsseg4_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -112,7 +105,6 @@ entry:
 define void @test_vsseg4_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -130,7 +122,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i
 define void @test_vsseg2_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0)
@@ -143,7 +134,6 @@ entry:
 define void @test_vsseg2_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
@@ -159,7 +149,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i
 define void @test_vsseg3_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -173,7 +162,6 @@ entry:
 define void @test_vsseg3_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -190,7 +178,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x i
 define void @test_vsseg4_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -205,7 +192,6 @@ entry:
 define void @test_vsseg4_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -223,7 +209,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i6
 define void @test_vsseg2_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0)
@@ -236,7 +221,6 @@ entry:
 define void @test_vsseg2_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
@@ -252,7 +236,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i6
 define void @test_vsseg3_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -266,7 +249,6 @@ entry:
 define void @test_vsseg3_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -283,7 +265,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i6
 define void @test_vsseg4_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -298,7 +279,6 @@ entry:
 define void @test_vsseg4_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -316,7 +296,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i6
 define void @test_vsseg5_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -332,7 +311,6 @@ entry:
 define void @test_vsseg5_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -351,7 +329,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i6
 define void @test_vsseg6_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -368,7 +345,6 @@ entry:
 define void @test_vsseg6_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -388,7 +364,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i6
 define void @test_vsseg7_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -406,7 +381,6 @@ entry:
 define void @test_vsseg7_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -427,7 +401,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i6
 define void @test_vsseg8_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -446,7 +419,6 @@ entry:
 define void @test_vsseg8_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -468,7 +440,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i3
 define void @test_vsseg2_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0)
@@ -481,7 +452,6 @@ entry:
 define void @test_vsseg2_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
@@ -497,7 +467,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i3
 define void @test_vsseg3_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -511,7 +480,6 @@ entry:
 define void @test_vsseg3_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -528,7 +496,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i3
 define void @test_vsseg4_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -543,7 +510,6 @@ entry:
 define void @test_vsseg4_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -561,7 +527,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i3
 define void @test_vsseg5_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -577,7 +542,6 @@ entry:
 define void @test_vsseg5_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -596,7 +560,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i3
 define void @test_vsseg6_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -613,7 +576,6 @@ entry:
 define void @test_vsseg6_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -633,7 +595,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i3
 define void @test_vsseg7_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -651,7 +612,6 @@ entry:
 define void @test_vsseg7_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -672,7 +632,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i3
 define void @test_vsseg8_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -691,7 +650,6 @@ entry:
 define void @test_vsseg8_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -713,7 +671,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i1
 define void @test_vsseg2_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -726,7 +683,6 @@ entry:
 define void @test_vsseg2_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -742,7 +698,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i1
 define void @test_vsseg3_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -756,7 +711,6 @@ entry:
 define void @test_vsseg3_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -773,7 +727,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i1
 define void @test_vsseg4_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -788,7 +741,6 @@ entry:
 define void @test_vsseg4_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -806,7 +758,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,
 define void @test_vsseg2_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0)
@@ -819,7 +770,6 @@ entry:
 define void @test_vsseg2_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
@@ -835,7 +785,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,
 define void @test_vsseg3_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -849,7 +798,6 @@ entry:
 define void @test_vsseg3_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -866,7 +814,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,
 define void @test_vsseg4_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -881,7 +828,6 @@ entry:
 define void @test_vsseg4_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -899,7 +845,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,
 define void @test_vsseg5_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -915,7 +860,6 @@ entry:
 define void @test_vsseg5_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -934,7 +878,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,
 define void @test_vsseg6_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -951,7 +894,6 @@ entry:
 define void @test_vsseg6_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -971,7 +913,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,
 define void @test_vsseg7_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -989,7 +930,6 @@ entry:
 define void @test_vsseg7_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1010,7 +950,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>,
 define void @test_vsseg8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1029,7 +968,6 @@ entry:
 define void @test_vsseg8_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1051,7 +989,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i1
 define void @test_vsseg2_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -1064,7 +1001,6 @@ entry:
 define void @test_vsseg2_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -1080,7 +1016,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i1
 define void @test_vsseg3_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1094,7 +1029,6 @@ entry:
 define void @test_vsseg3_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -1111,7 +1045,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i1
 define void @test_vsseg4_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1126,7 +1059,6 @@ entry:
 define void @test_vsseg4_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1144,7 +1076,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i1
 define void @test_vsseg5_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1160,7 +1091,6 @@ entry:
 define void @test_vsseg5_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1179,7 +1109,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i1
 define void @test_vsseg6_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1196,7 +1125,6 @@ entry:
 define void @test_vsseg6_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1216,7 +1144,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i1
 define void @test_vsseg7_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1234,7 +1161,6 @@ entry:
 define void @test_vsseg7_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1255,7 +1181,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i1
 define void @test_vsseg8_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1274,7 +1199,6 @@ entry:
 define void @test_vsseg8_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1296,7 +1220,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i3
 define void @test_vsseg2_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0)
@@ -1309,7 +1232,6 @@ entry:
 define void @test_vsseg2_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
@@ -1325,7 +1247,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i3
 define void @test_vsseg3_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1339,7 +1260,6 @@ entry:
 define void @test_vsseg3_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1356,7 +1276,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i3
 define void @test_vsseg4_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1371,7 +1290,6 @@ entry:
 define void @test_vsseg4_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1389,7 +1307,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i3
 define void @test_vsseg5_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1405,7 +1322,6 @@ entry:
 define void @test_vsseg5_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1424,7 +1340,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i3
 define void @test_vsseg6_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1441,7 +1356,6 @@ entry:
 define void @test_vsseg6_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1461,7 +1375,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i3
 define void @test_vsseg7_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1479,7 +1392,6 @@ entry:
 define void @test_vsseg7_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1500,7 +1412,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i3
 define void @test_vsseg8_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1519,7 +1430,6 @@ entry:
 define void @test_vsseg8_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1541,7 +1451,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,
 define void @test_vsseg2_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0)
@@ -1554,7 +1463,6 @@ entry:
 define void @test_vsseg2_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
@@ -1570,7 +1478,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,
 define void @test_vsseg3_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1584,7 +1491,6 @@ entry:
 define void @test_vsseg3_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -1601,7 +1507,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,
 define void @test_vsseg4_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1616,7 +1521,6 @@ entry:
 define void @test_vsseg4_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1634,7 +1538,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,
 define void @test_vsseg5_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1650,7 +1553,6 @@ entry:
 define void @test_vsseg5_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1669,7 +1571,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,
 define void @test_vsseg6_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1686,7 +1587,6 @@ entry:
 define void @test_vsseg6_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1706,7 +1606,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,
 define void @test_vsseg7_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1724,7 +1623,6 @@ entry:
 define void @test_vsseg7_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1745,7 +1643,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>,
 define void @test_vsseg8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1764,7 +1661,6 @@ entry:
 define void @test_vsseg8_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1786,7 +1682,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i6
 define void @test_vsseg2_nxv4i64(<vscale x 4 x i64> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0)
@@ -1799,7 +1694,6 @@ entry:
 define void @test_vsseg2_mask_nxv4i64(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
@@ -1815,7 +1709,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i1
 define void @test_vsseg2_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -1828,7 +1721,6 @@ entry:
 define void @test_vsseg2_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -1844,7 +1736,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i1
 define void @test_vsseg3_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1858,7 +1749,6 @@ entry:
 define void @test_vsseg3_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1875,7 +1765,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i1
 define void @test_vsseg4_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1890,7 +1779,6 @@ entry:
 define void @test_vsseg4_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1908,7 +1796,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i1
 define void @test_vsseg5_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1924,7 +1811,6 @@ entry:
 define void @test_vsseg5_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1943,7 +1829,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i1
 define void @test_vsseg6_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1960,7 +1845,6 @@ entry:
 define void @test_vsseg6_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1980,7 +1864,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i1
 define void @test_vsseg7_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1998,7 +1881,6 @@ entry:
 define void @test_vsseg7_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2019,7 +1901,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i1
 define void @test_vsseg8_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2038,7 +1919,6 @@ entry:
 define void @test_vsseg8_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2060,7 +1940,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,
 define void @test_vsseg2_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0)
@@ -2073,7 +1952,6 @@ entry:
 define void @test_vsseg2_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
@@ -2089,7 +1967,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,
 define void @test_vsseg3_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -2103,7 +1980,6 @@ entry:
 define void @test_vsseg3_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -2120,7 +1996,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,
 define void @test_vsseg4_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2135,7 +2010,6 @@ entry:
 define void @test_vsseg4_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2153,7 +2027,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,
 define void @test_vsseg5_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2169,7 +2042,6 @@ entry:
 define void @test_vsseg5_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2188,7 +2060,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,
 define void @test_vsseg6_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2205,7 +2076,6 @@ entry:
 define void @test_vsseg6_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2225,7 +2095,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,
 define void @test_vsseg7_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2243,7 +2112,6 @@ entry:
 define void @test_vsseg7_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2264,7 +2132,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>,
 define void @test_vsseg8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2283,7 +2150,6 @@ entry:
 define void @test_vsseg8_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2305,7 +2171,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,
 define void @test_vsseg2_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0)
@@ -2318,7 +2183,6 @@ entry:
 define void @test_vsseg2_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
@@ -2334,7 +2198,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,
 define void @test_vsseg3_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -2348,7 +2211,6 @@ entry:
 define void @test_vsseg3_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -2365,7 +2227,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,
 define void @test_vsseg4_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2380,7 +2241,6 @@ entry:
 define void @test_vsseg4_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2398,7 +2258,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,
 define void @test_vsseg5_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2414,7 +2273,6 @@ entry:
 define void @test_vsseg5_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2433,7 +2291,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,
 define void @test_vsseg6_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2450,7 +2307,6 @@ entry:
 define void @test_vsseg6_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2470,7 +2326,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,
 define void @test_vsseg7_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2488,7 +2343,6 @@ entry:
 define void @test_vsseg7_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2509,7 +2363,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>,
 define void @test_vsseg8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2528,7 +2381,6 @@ entry:
 define void @test_vsseg8_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2550,7 +2402,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i3
 define void @test_vsseg2_nxv8i32(<vscale x 8 x i32> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0)
@@ -2563,7 +2414,6 @@ entry:
 define void @test_vsseg2_mask_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
@@ -2579,7 +2429,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x i
 define void @test_vsseg2_nxv32i8(<vscale x 32 x i8> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0)
@@ -2592,7 +2441,6 @@ entry:
 define void @test_vsseg2_mask_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e8.v v8, (a0), v0.t
@@ -2608,7 +2456,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i1
 define void @test_vsseg2_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -2621,7 +2468,6 @@ entry:
 define void @test_vsseg2_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -2637,7 +2483,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i1
 define void @test_vsseg3_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -2651,7 +2496,6 @@ entry:
 define void @test_vsseg3_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -2668,7 +2512,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i1
 define void @test_vsseg4_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2683,7 +2526,6 @@ entry:
 define void @test_vsseg4_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2701,7 +2543,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i1
 define void @test_vsseg5_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2717,7 +2558,6 @@ entry:
 define void @test_vsseg5_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2736,7 +2576,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i1
 define void @test_vsseg6_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2753,7 +2592,6 @@ entry:
 define void @test_vsseg6_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2773,7 +2611,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i1
 define void @test_vsseg7_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2791,7 +2628,6 @@ entry:
 define void @test_vsseg7_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2812,7 +2648,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i1
 define void @test_vsseg8_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2831,7 +2666,6 @@ entry:
 define void @test_vsseg8_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2853,7 +2687,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i6
 define void @test_vsseg2_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0)
@@ -2866,7 +2699,6 @@ entry:
 define void @test_vsseg2_mask_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
@@ -2882,7 +2714,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i6
 define void @test_vsseg3_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -2896,7 +2727,6 @@ entry:
 define void @test_vsseg3_mask_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -2913,7 +2743,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i6
 define void @test_vsseg4_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -2928,7 +2757,6 @@ entry:
 define void @test_vsseg4_mask_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -2946,7 +2774,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv16f16(<vscale x 16 x half>,<vscale x 16
 define void @test_vsseg2_nxv16f16(<vscale x 16 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -2959,7 +2786,6 @@ entry:
 define void @test_vsseg2_mask_nxv16f16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -2975,7 +2801,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv4f64(<vscale x 4 x double>,<vscale x 4 x
 define void @test_vsseg2_nxv4f64(<vscale x 4 x double> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0)
@@ -2988,7 +2813,6 @@ entry:
 define void @test_vsseg2_mask_nxv4f64(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
@@ -3004,7 +2828,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x
 define void @test_vsseg2_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0)
@@ -3017,7 +2840,6 @@ entry:
 define void @test_vsseg2_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
@@ -3033,7 +2855,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x
 define void @test_vsseg3_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -3047,7 +2868,6 @@ entry:
 define void @test_vsseg3_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -3064,7 +2884,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x
 define void @test_vsseg4_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3079,7 +2898,6 @@ entry:
 define void @test_vsseg4_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3097,7 +2915,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x
 define void @test_vsseg5_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3113,7 +2930,6 @@ entry:
 define void @test_vsseg5_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3132,7 +2948,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x
 define void @test_vsseg6_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3149,7 +2964,6 @@ entry:
 define void @test_vsseg6_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3169,7 +2983,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x
 define void @test_vsseg7_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3187,7 +3000,6 @@ entry:
 define void @test_vsseg7_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3208,7 +3020,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1 x
 define void @test_vsseg8_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3227,7 +3038,6 @@ entry:
 define void @test_vsseg8_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3249,7 +3059,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vsseg2_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0)
@@ -3262,7 +3071,6 @@ entry:
 define void @test_vsseg2_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
@@ -3278,7 +3086,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vsseg3_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -3292,7 +3099,6 @@ entry:
 define void @test_vsseg3_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -3309,7 +3115,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vsseg4_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3324,7 +3129,6 @@ entry:
 define void @test_vsseg4_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3342,7 +3146,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vsseg5_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3358,7 +3161,6 @@ entry:
 define void @test_vsseg5_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3377,7 +3179,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vsseg6_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3394,7 +3195,6 @@ entry:
 define void @test_vsseg6_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3414,7 +3214,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vsseg7_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3432,7 +3231,6 @@ entry:
 define void @test_vsseg7_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3453,7 +3251,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vsseg8_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3472,7 +3269,6 @@ entry:
 define void @test_vsseg8_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3494,7 +3290,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x h
 define void @test_vsseg2_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -3507,7 +3302,6 @@ entry:
 define void @test_vsseg2_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -3523,7 +3317,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x h
 define void @test_vsseg3_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -3537,7 +3330,6 @@ entry:
 define void @test_vsseg3_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -3554,7 +3346,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x h
 define void @test_vsseg4_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3569,7 +3360,6 @@ entry:
 define void @test_vsseg4_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3587,7 +3377,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x h
 define void @test_vsseg5_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3603,7 +3392,6 @@ entry:
 define void @test_vsseg5_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3622,7 +3410,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x h
 define void @test_vsseg6_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3639,7 +3426,6 @@ entry:
 define void @test_vsseg6_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3659,7 +3445,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x h
 define void @test_vsseg7_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3677,7 +3462,6 @@ entry:
 define void @test_vsseg7_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3698,7 +3482,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x h
 define void @test_vsseg8_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3717,7 +3500,6 @@ entry:
 define void @test_vsseg8_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3739,7 +3521,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vsseg2_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0)
@@ -3752,7 +3533,6 @@ entry:
 define void @test_vsseg2_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
@@ -3768,7 +3548,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vsseg3_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -3782,7 +3561,6 @@ entry:
 define void @test_vsseg3_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -3799,7 +3577,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vsseg4_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3814,7 +3591,6 @@ entry:
 define void @test_vsseg4_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3832,7 +3608,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vsseg5_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3848,7 +3623,6 @@ entry:
 define void @test_vsseg5_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3867,7 +3641,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vsseg6_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3884,7 +3657,6 @@ entry:
 define void @test_vsseg6_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3904,7 +3676,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vsseg7_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3922,7 +3693,6 @@ entry:
 define void @test_vsseg7_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3943,7 +3713,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vsseg8_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3962,7 +3731,6 @@ entry:
 define void @test_vsseg8_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3984,7 +3752,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x h
 define void @test_vsseg2_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -3997,7 +3764,6 @@ entry:
 define void @test_vsseg2_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -4013,7 +3779,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x h
 define void @test_vsseg3_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -4027,7 +3792,6 @@ entry:
 define void @test_vsseg3_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -4044,7 +3808,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x h
 define void @test_vsseg4_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -4059,7 +3822,6 @@ entry:
 define void @test_vsseg4_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -4077,7 +3839,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv8f32(<vscale x 8 x float>,<vscale x 8 x
 define void @test_vsseg2_nxv8f32(<vscale x 8 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0)
@@ -4090,7 +3851,6 @@ entry:
 define void @test_vsseg2_mask_nxv8f32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
@@ -4106,7 +3866,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x
 define void @test_vsseg2_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0)
@@ -4119,7 +3878,6 @@ entry:
 define void @test_vsseg2_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e64.v v8, (a0), v0.t
@@ -4135,7 +3893,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x
 define void @test_vsseg3_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -4149,7 +3906,6 @@ entry:
 define void @test_vsseg3_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -4166,7 +3922,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2 x
 define void @test_vsseg4_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -4181,7 +3936,6 @@ entry:
 define void @test_vsseg4_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -4199,7 +3953,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x h
 define void @test_vsseg2_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -4212,7 +3965,6 @@ entry:
 define void @test_vsseg2_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -4228,7 +3980,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x h
 define void @test_vsseg3_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -4242,7 +3993,6 @@ entry:
 define void @test_vsseg3_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -4259,7 +4009,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x h
 define void @test_vsseg4_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4274,7 +4023,6 @@ entry:
 define void @test_vsseg4_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4292,7 +4040,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x h
 define void @test_vsseg5_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4308,7 +4055,6 @@ entry:
 define void @test_vsseg5_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4327,7 +4073,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x h
 define void @test_vsseg6_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4344,7 +4089,6 @@ entry:
 define void @test_vsseg6_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4364,7 +4108,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x h
 define void @test_vsseg7_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4382,7 +4125,6 @@ entry:
 define void @test_vsseg7_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4403,7 +4145,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x h
 define void @test_vsseg8_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4422,7 +4163,6 @@ entry:
 define void @test_vsseg8_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4444,7 +4184,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x h
 define void @test_vsseg2_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -4457,7 +4196,6 @@ entry:
 define void @test_vsseg2_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -4473,7 +4211,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x h
 define void @test_vsseg3_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -4487,7 +4224,6 @@ entry:
 define void @test_vsseg3_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -4504,7 +4240,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x h
 define void @test_vsseg4_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4519,7 +4254,6 @@ entry:
 define void @test_vsseg4_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4537,7 +4271,6 @@ declare void @llvm.riscv.vsseg5.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x h
 define void @test_vsseg5_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4553,7 +4286,6 @@ entry:
 define void @test_vsseg5_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg5_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4572,7 +4304,6 @@ declare void @llvm.riscv.vsseg6.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x h
 define void @test_vsseg6_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4589,7 +4320,6 @@ entry:
 define void @test_vsseg6_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg6_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4609,7 +4339,6 @@ declare void @llvm.riscv.vsseg7.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x h
 define void @test_vsseg7_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4627,7 +4356,6 @@ entry:
 define void @test_vsseg7_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg7_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4648,7 +4376,6 @@ declare void @llvm.riscv.vsseg8.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x h
 define void @test_vsseg8_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4667,7 +4394,6 @@ entry:
 define void @test_vsseg8_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg8_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4689,7 +4415,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x
 define void @test_vsseg2_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0)
@@ -4702,7 +4427,6 @@ entry:
 define void @test_vsseg2_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsseg2e32.v v8, (a0), v0.t
@@ -4718,7 +4442,6 @@ declare void @llvm.riscv.vsseg3.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x
 define void @test_vsseg3_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -4732,7 +4455,6 @@ entry:
 define void @test_vsseg3_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg3_mask_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -4749,7 +4471,6 @@ declare void @llvm.riscv.vsseg4.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x
 define void @test_vsseg4_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -4764,7 +4485,6 @@ entry:
 define void @test_vsseg4_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsseg4_mask_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll
index 30b407124b3a1..01552a72cc930 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv32.ll
@@ -8,7 +8,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16
 define void @test_vssseg2_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -21,7 +20,6 @@ entry:
 define void @test_vssseg2_mask_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -37,7 +35,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>
 define void @test_vssseg2_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1
@@ -50,7 +47,6 @@ entry:
 define void @test_vssseg2_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1, v0.t
@@ -66,7 +62,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>
 define void @test_vssseg3_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
@@ -80,7 +75,6 @@ entry:
 define void @test_vssseg3_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
@@ -97,7 +91,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>
 define void @test_vssseg4_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -112,7 +105,6 @@ entry:
 define void @test_vssseg4_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -130,7 +122,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>
 define void @test_vssseg5_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -146,7 +137,6 @@ entry:
 define void @test_vssseg5_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -165,7 +155,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>
 define void @test_vssseg6_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -182,7 +171,6 @@ entry:
 define void @test_vssseg6_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -202,7 +190,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>
 define void @test_vssseg7_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -220,7 +207,6 @@ entry:
 define void @test_vssseg7_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -241,7 +227,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>
 define void @test_vssseg8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -260,7 +245,6 @@ entry:
 define void @test_vssseg8_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -282,7 +266,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x
 define void @test_vssseg2_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1
@@ -295,7 +278,6 @@ entry:
 define void @test_vssseg2_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1, v0.t
@@ -311,7 +293,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x
 define void @test_vssseg3_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
@@ -325,7 +306,6 @@ entry:
 define void @test_vssseg3_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
@@ -342,7 +322,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x
 define void @test_vssseg4_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -357,7 +336,6 @@ entry:
 define void @test_vssseg4_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -375,7 +353,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i
 define void @test_vssseg2_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1
@@ -388,7 +365,6 @@ entry:
 define void @test_vssseg2_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1, v0.t
@@ -404,7 +380,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i
 define void @test_vssseg3_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
@@ -418,7 +393,6 @@ entry:
 define void @test_vssseg3_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
@@ -435,7 +409,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i
 define void @test_vssseg4_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -450,7 +423,6 @@ entry:
 define void @test_vssseg4_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -468,7 +440,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i
 define void @test_vssseg5_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -484,7 +455,6 @@ entry:
 define void @test_vssseg5_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -503,7 +473,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i
 define void @test_vssseg6_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -520,7 +489,6 @@ entry:
 define void @test_vssseg6_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -540,7 +508,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i
 define void @test_vssseg7_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -558,7 +525,6 @@ entry:
 define void @test_vssseg7_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -579,7 +545,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i
 define void @test_vssseg8_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -598,7 +563,6 @@ entry:
 define void @test_vssseg8_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -620,7 +584,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i
 define void @test_vssseg2_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -633,7 +596,6 @@ entry:
 define void @test_vssseg2_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -649,7 +611,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i
 define void @test_vssseg3_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
@@ -663,7 +624,6 @@ entry:
 define void @test_vssseg3_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
@@ -680,7 +640,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i
 define void @test_vssseg4_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -695,7 +654,6 @@ entry:
 define void @test_vssseg4_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -713,7 +671,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i
 define void @test_vssseg5_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -729,7 +686,6 @@ entry:
 define void @test_vssseg5_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -748,7 +704,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i
 define void @test_vssseg6_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -765,7 +720,6 @@ entry:
 define void @test_vssseg6_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -785,7 +739,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i
 define void @test_vssseg7_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -803,7 +756,6 @@ entry:
 define void @test_vssseg7_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -824,7 +776,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i
 define void @test_vssseg8_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -843,7 +794,6 @@ entry:
 define void @test_vssseg8_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -865,7 +815,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i
 define void @test_vssseg2_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1
@@ -878,7 +827,6 @@ entry:
 define void @test_vssseg2_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1, v0.t
@@ -894,7 +842,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i
 define void @test_vssseg3_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
@@ -908,7 +855,6 @@ entry:
 define void @test_vssseg3_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
@@ -925,7 +871,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i
 define void @test_vssseg4_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -940,7 +885,6 @@ entry:
 define void @test_vssseg4_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -958,7 +902,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i
 define void @test_vssseg5_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -974,7 +917,6 @@ entry:
 define void @test_vssseg5_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -993,7 +935,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i
 define void @test_vssseg6_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1010,7 +951,6 @@ entry:
 define void @test_vssseg6_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1030,7 +970,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i
 define void @test_vssseg7_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1048,7 +987,6 @@ entry:
 define void @test_vssseg7_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1069,7 +1007,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i
 define void @test_vssseg8_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1088,7 +1025,6 @@ entry:
 define void @test_vssseg8_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1110,7 +1046,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i
 define void @test_vssseg2_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -1123,7 +1058,6 @@ entry:
 define void @test_vssseg2_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -1139,7 +1073,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i
 define void @test_vssseg3_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
@@ -1153,7 +1086,6 @@ entry:
 define void @test_vssseg3_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
@@ -1170,7 +1102,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i
 define void @test_vssseg4_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -1185,7 +1116,6 @@ entry:
 define void @test_vssseg4_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -1203,7 +1133,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>
 define void @test_vssseg2_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1
@@ -1216,7 +1145,6 @@ entry:
 define void @test_vssseg2_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1, v0.t
@@ -1232,7 +1160,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>
 define void @test_vssseg3_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
@@ -1246,7 +1173,6 @@ entry:
 define void @test_vssseg3_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
@@ -1263,7 +1189,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>
 define void @test_vssseg4_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1278,7 +1203,6 @@ entry:
 define void @test_vssseg4_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1296,7 +1220,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>
 define void @test_vssseg5_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1312,7 +1235,6 @@ entry:
 define void @test_vssseg5_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1331,7 +1253,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>
 define void @test_vssseg6_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1348,7 +1269,6 @@ entry:
 define void @test_vssseg6_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1368,7 +1288,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>
 define void @test_vssseg7_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1386,7 +1305,6 @@ entry:
 define void @test_vssseg7_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1407,7 +1325,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>
 define void @test_vssseg8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1426,7 +1343,6 @@ entry:
 define void @test_vssseg8_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1448,7 +1364,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i
 define void @test_vssseg2_nxv8i32(<vscale x 8 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1
@@ -1461,7 +1376,6 @@ entry:
 define void @test_vssseg2_mask_nxv8i32(<vscale x 8 x i32> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1, v0.t
@@ -1477,7 +1391,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>
 define void @test_vssseg2_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1
@@ -1490,7 +1403,6 @@ entry:
 define void @test_vssseg2_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1, v0.t
@@ -1506,7 +1418,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>
 define void @test_vssseg3_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
@@ -1520,7 +1431,6 @@ entry:
 define void @test_vssseg3_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
@@ -1537,7 +1447,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>
 define void @test_vssseg4_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1552,7 +1461,6 @@ entry:
 define void @test_vssseg4_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1570,7 +1478,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>
 define void @test_vssseg5_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1586,7 +1493,6 @@ entry:
 define void @test_vssseg5_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1605,7 +1511,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>
 define void @test_vssseg6_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1622,7 +1527,6 @@ entry:
 define void @test_vssseg6_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1642,7 +1546,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>
 define void @test_vssseg7_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1660,7 +1563,6 @@ entry:
 define void @test_vssseg7_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1681,7 +1583,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>
 define void @test_vssseg8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1700,7 +1601,6 @@ entry:
 define void @test_vssseg8_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1722,7 +1622,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i
 define void @test_vssseg2_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -1735,7 +1634,6 @@ entry:
 define void @test_vssseg2_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -1751,7 +1649,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i
 define void @test_vssseg3_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
@@ -1765,7 +1662,6 @@ entry:
 define void @test_vssseg3_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
@@ -1782,7 +1678,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i
 define void @test_vssseg4_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1797,7 +1692,6 @@ entry:
 define void @test_vssseg4_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1815,7 +1709,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i
 define void @test_vssseg5_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1831,7 +1724,6 @@ entry:
 define void @test_vssseg5_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1850,7 +1742,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i
 define void @test_vssseg6_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1867,7 +1758,6 @@ entry:
 define void @test_vssseg6_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1887,7 +1777,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i
 define void @test_vssseg7_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1905,7 +1794,6 @@ entry:
 define void @test_vssseg7_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1926,7 +1814,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i
 define void @test_vssseg8_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1945,7 +1832,6 @@ entry:
 define void @test_vssseg8_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1967,7 +1853,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x
 define void @test_vssseg2_nxv32i8(<vscale x 32 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1
@@ -1980,7 +1865,6 @@ entry:
 define void @test_vssseg2_mask_nxv32i8(<vscale x 32 x i8> %val, ptr %base, i32 %offset, <vscale x 32 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1, v0.t
@@ -1996,7 +1880,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>
 define void @test_vssseg2_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1
@@ -2009,7 +1892,6 @@ entry:
 define void @test_vssseg2_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1, v0.t
@@ -2025,7 +1907,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>
 define void @test_vssseg3_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
@@ -2039,7 +1920,6 @@ entry:
 define void @test_vssseg3_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
@@ -2056,7 +1936,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>
 define void @test_vssseg4_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2071,7 +1950,6 @@ entry:
 define void @test_vssseg4_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2089,7 +1967,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>
 define void @test_vssseg5_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2105,7 +1982,6 @@ entry:
 define void @test_vssseg5_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2124,7 +2000,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>
 define void @test_vssseg6_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2141,7 +2016,6 @@ entry:
 define void @test_vssseg6_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2161,7 +2035,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>
 define void @test_vssseg7_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2179,7 +2052,6 @@ entry:
 define void @test_vssseg7_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2200,7 +2072,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>
 define void @test_vssseg8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2219,7 +2090,6 @@ entry:
 define void @test_vssseg8_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2241,7 +2111,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i
 define void @test_vssseg2_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -2254,7 +2123,6 @@ entry:
 define void @test_vssseg2_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -2270,7 +2138,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i
 define void @test_vssseg3_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
@@ -2284,7 +2151,6 @@ entry:
 define void @test_vssseg3_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
@@ -2301,7 +2167,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i
 define void @test_vssseg4_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2316,7 +2181,6 @@ entry:
 define void @test_vssseg4_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2334,7 +2198,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i
 define void @test_vssseg5_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2350,7 +2213,6 @@ entry:
 define void @test_vssseg5_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2369,7 +2231,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i
 define void @test_vssseg6_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2386,7 +2247,6 @@ entry:
 define void @test_vssseg6_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2406,7 +2266,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i
 define void @test_vssseg7_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2424,7 +2283,6 @@ entry:
 define void @test_vssseg7_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2445,7 +2303,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i
 define void @test_vssseg8_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2464,7 +2321,6 @@ entry:
 define void @test_vssseg8_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2486,7 +2342,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i
 define void @test_vssseg2_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1
@@ -2499,7 +2354,6 @@ entry:
 define void @test_vssseg2_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1, v0.t
@@ -2515,7 +2369,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i
 define void @test_vssseg3_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
@@ -2529,7 +2382,6 @@ entry:
 define void @test_vssseg3_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
@@ -2546,7 +2398,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i
 define void @test_vssseg4_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -2561,7 +2412,6 @@ entry:
 define void @test_vssseg4_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -2579,7 +2429,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv16f16(<vscale x 16 x half>,<vscale x 16
 define void @test_vssseg2_nxv16f16(<vscale x 16 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -2592,7 +2441,6 @@ entry:
 define void @test_vssseg2_mask_nxv16f16(<vscale x 16 x half> %val, ptr %base, i32 %offset, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -2608,7 +2456,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv4f64(<vscale x 4 x double>,<vscale x 4
 define void @test_vssseg2_nxv4f64(<vscale x 4 x double> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1
@@ -2621,7 +2468,6 @@ entry:
 define void @test_vssseg2_mask_nxv4f64(<vscale x 4 x double> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1, v0.t
@@ -2637,7 +2483,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1
 define void @test_vssseg2_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1
@@ -2650,7 +2495,6 @@ entry:
 define void @test_vssseg2_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1, v0.t
@@ -2666,7 +2510,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1
 define void @test_vssseg3_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
@@ -2680,7 +2523,6 @@ entry:
 define void @test_vssseg3_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
@@ -2697,7 +2539,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1
 define void @test_vssseg4_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2712,7 +2553,6 @@ entry:
 define void @test_vssseg4_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2730,7 +2570,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1
 define void @test_vssseg5_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2746,7 +2585,6 @@ entry:
 define void @test_vssseg5_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2765,7 +2603,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1
 define void @test_vssseg6_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2782,7 +2619,6 @@ entry:
 define void @test_vssseg6_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2802,7 +2638,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1
 define void @test_vssseg7_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2820,7 +2655,6 @@ entry:
 define void @test_vssseg7_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2841,7 +2675,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1
 define void @test_vssseg8_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2860,7 +2693,6 @@ entry:
 define void @test_vssseg8_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2882,7 +2714,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vssseg2_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1
@@ -2895,7 +2726,6 @@ entry:
 define void @test_vssseg2_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1, v0.t
@@ -2911,7 +2741,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vssseg3_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
@@ -2925,7 +2754,6 @@ entry:
 define void @test_vssseg3_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
@@ -2942,7 +2770,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vssseg4_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2957,7 +2784,6 @@ entry:
 define void @test_vssseg4_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2975,7 +2801,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vssseg5_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2991,7 +2816,6 @@ entry:
 define void @test_vssseg5_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3010,7 +2834,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vssseg6_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3027,7 +2850,6 @@ entry:
 define void @test_vssseg6_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3047,7 +2869,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vssseg7_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3065,7 +2886,6 @@ entry:
 define void @test_vssseg7_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3086,7 +2906,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vssseg8_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3105,7 +2924,6 @@ entry:
 define void @test_vssseg8_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3127,7 +2945,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x
 define void @test_vssseg2_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -3140,7 +2957,6 @@ entry:
 define void @test_vssseg2_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -3156,7 +2972,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x
 define void @test_vssseg3_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
@@ -3170,7 +2985,6 @@ entry:
 define void @test_vssseg3_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
@@ -3187,7 +3001,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x
 define void @test_vssseg4_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3202,7 +3015,6 @@ entry:
 define void @test_vssseg4_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3220,7 +3032,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x
 define void @test_vssseg5_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3236,7 +3047,6 @@ entry:
 define void @test_vssseg5_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3255,7 +3065,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x
 define void @test_vssseg6_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3272,7 +3081,6 @@ entry:
 define void @test_vssseg6_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3292,7 +3100,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x
 define void @test_vssseg7_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3310,7 +3117,6 @@ entry:
 define void @test_vssseg7_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3331,7 +3137,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x
 define void @test_vssseg8_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3350,7 +3155,6 @@ entry:
 define void @test_vssseg8_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3372,7 +3176,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vssseg2_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1
@@ -3385,7 +3188,6 @@ entry:
 define void @test_vssseg2_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1, v0.t
@@ -3401,7 +3203,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vssseg3_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
@@ -3415,7 +3216,6 @@ entry:
 define void @test_vssseg3_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
@@ -3432,7 +3232,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vssseg4_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3447,7 +3246,6 @@ entry:
 define void @test_vssseg4_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3465,7 +3263,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vssseg5_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3481,7 +3278,6 @@ entry:
 define void @test_vssseg5_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3500,7 +3296,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vssseg6_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3517,7 +3312,6 @@ entry:
 define void @test_vssseg6_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3537,7 +3331,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vssseg7_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3555,7 +3348,6 @@ entry:
 define void @test_vssseg7_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3576,7 +3368,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vssseg8_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3595,7 +3386,6 @@ entry:
 define void @test_vssseg8_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i32 %offset, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3617,7 +3407,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x
 define void @test_vssseg2_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -3630,7 +3419,6 @@ entry:
 define void @test_vssseg2_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -3646,7 +3434,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x
 define void @test_vssseg3_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
@@ -3660,7 +3447,6 @@ entry:
 define void @test_vssseg3_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
@@ -3677,7 +3463,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x
 define void @test_vssseg4_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -3692,7 +3477,6 @@ entry:
 define void @test_vssseg4_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -3710,7 +3494,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv8f32(<vscale x 8 x float>,<vscale x 8 x
 define void @test_vssseg2_nxv8f32(<vscale x 8 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1
@@ -3723,7 +3506,6 @@ entry:
 define void @test_vssseg2_mask_nxv8f32(<vscale x 8 x float> %val, ptr %base, i32 %offset, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1, v0.t
@@ -3739,7 +3521,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2
 define void @test_vssseg2_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1
@@ -3752,7 +3533,6 @@ entry:
 define void @test_vssseg2_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1, v0.t
@@ -3768,7 +3548,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2
 define void @test_vssseg3_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
@@ -3782,7 +3561,6 @@ entry:
 define void @test_vssseg3_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
@@ -3799,7 +3577,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2
 define void @test_vssseg4_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -3814,7 +3591,6 @@ entry:
 define void @test_vssseg4_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -3832,7 +3608,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x
 define void @test_vssseg2_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -3845,7 +3620,6 @@ entry:
 define void @test_vssseg2_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -3861,7 +3635,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x
 define void @test_vssseg3_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
@@ -3875,7 +3648,6 @@ entry:
 define void @test_vssseg3_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
@@ -3892,7 +3664,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x
 define void @test_vssseg4_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3907,7 +3678,6 @@ entry:
 define void @test_vssseg4_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3925,7 +3695,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x
 define void @test_vssseg5_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3941,7 +3710,6 @@ entry:
 define void @test_vssseg5_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3960,7 +3728,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x
 define void @test_vssseg6_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3977,7 +3744,6 @@ entry:
 define void @test_vssseg6_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3997,7 +3763,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x
 define void @test_vssseg7_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4015,7 +3780,6 @@ entry:
 define void @test_vssseg7_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4036,7 +3800,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x
 define void @test_vssseg8_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4055,7 +3818,6 @@ entry:
 define void @test_vssseg8_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4077,7 +3839,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x
 define void @test_vssseg2_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -4090,7 +3851,6 @@ entry:
 define void @test_vssseg2_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -4106,7 +3866,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x
 define void @test_vssseg3_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
@@ -4120,7 +3879,6 @@ entry:
 define void @test_vssseg3_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
@@ -4137,7 +3895,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x
 define void @test_vssseg4_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4152,7 +3909,6 @@ entry:
 define void @test_vssseg4_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4170,7 +3926,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x
 define void @test_vssseg5_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4186,7 +3941,6 @@ entry:
 define void @test_vssseg5_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4205,7 +3959,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x
 define void @test_vssseg6_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4222,7 +3975,6 @@ entry:
 define void @test_vssseg6_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4242,7 +3994,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x
 define void @test_vssseg7_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4260,7 +4011,6 @@ entry:
 define void @test_vssseg7_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4281,7 +4031,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x
 define void @test_vssseg8_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4300,7 +4049,6 @@ entry:
 define void @test_vssseg8_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i32 %offset, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4322,7 +4070,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x
 define void @test_vssseg2_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1
@@ -4335,7 +4082,6 @@ entry:
 define void @test_vssseg2_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1, v0.t
@@ -4351,7 +4097,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x
 define void @test_vssseg3_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
@@ -4365,7 +4110,6 @@ entry:
 define void @test_vssseg3_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
@@ -4382,7 +4126,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x
 define void @test_vssseg4_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %offset, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -4397,7 +4140,6 @@ entry:
 define void @test_vssseg4_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, i32 %offset, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll
index 9778447cd848f..6e067e6358239 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vssseg-rv64.ll
@@ -8,7 +8,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16
 define void @test_vssseg2_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -21,7 +20,6 @@ entry:
 define void @test_vssseg2_mask_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -37,7 +35,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i
 define void @test_vssseg2_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1
@@ -50,7 +47,6 @@ entry:
 define void @test_vssseg2_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1, v0.t
@@ -66,7 +62,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i
 define void @test_vssseg3_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
@@ -80,7 +75,6 @@ entry:
 define void @test_vssseg3_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
@@ -97,7 +91,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv4i32(<vscale x 4 x i32>,<vscale x 4 x i
 define void @test_vssseg4_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -112,7 +105,6 @@ entry:
 define void @test_vssseg4_mask_nxv4i32(<vscale x 4 x i32> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -130,7 +122,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x
 define void @test_vssseg2_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1
@@ -143,7 +134,6 @@ entry:
 define void @test_vssseg2_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1, v0.t
@@ -159,7 +149,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x
 define void @test_vssseg3_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
@@ -173,7 +162,6 @@ entry:
 define void @test_vssseg3_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, ma
@@ -190,7 +178,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv16i8(<vscale x 16 x i8>,<vscale x 16 x
 define void @test_vssseg4_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -205,7 +192,6 @@ entry:
 define void @test_vssseg4_mask_nxv16i8(<vscale x 16 x i8> %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -223,7 +209,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i
 define void @test_vssseg2_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1
@@ -236,7 +221,6 @@ entry:
 define void @test_vssseg2_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1, v0.t
@@ -252,7 +236,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i
 define void @test_vssseg3_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
@@ -266,7 +249,6 @@ entry:
 define void @test_vssseg3_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
@@ -283,7 +265,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i
 define void @test_vssseg4_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -298,7 +279,6 @@ entry:
 define void @test_vssseg4_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -316,7 +296,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i
 define void @test_vssseg5_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -332,7 +311,6 @@ entry:
 define void @test_vssseg5_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -351,7 +329,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i
 define void @test_vssseg6_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -368,7 +345,6 @@ entry:
 define void @test_vssseg6_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -388,7 +364,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i
 define void @test_vssseg7_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -406,7 +381,6 @@ entry:
 define void @test_vssseg7_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -427,7 +401,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv1i64(<vscale x 1 x i64>,<vscale x 1 x i
 define void @test_vssseg8_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -446,7 +419,6 @@ entry:
 define void @test_vssseg8_mask_nxv1i64(<vscale x 1 x i64> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -468,7 +440,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i
 define void @test_vssseg2_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1
@@ -481,7 +452,6 @@ entry:
 define void @test_vssseg2_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1, v0.t
@@ -497,7 +467,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i
 define void @test_vssseg3_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
@@ -511,7 +480,6 @@ entry:
 define void @test_vssseg3_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
@@ -528,7 +496,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i
 define void @test_vssseg4_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -543,7 +510,6 @@ entry:
 define void @test_vssseg4_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -561,7 +527,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i
 define void @test_vssseg5_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -577,7 +542,6 @@ entry:
 define void @test_vssseg5_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -596,7 +560,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i
 define void @test_vssseg6_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -613,7 +576,6 @@ entry:
 define void @test_vssseg6_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -633,7 +595,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i
 define void @test_vssseg7_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -651,7 +612,6 @@ entry:
 define void @test_vssseg7_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -672,7 +632,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv1i32(<vscale x 1 x i32>,<vscale x 1 x i
 define void @test_vssseg8_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -691,7 +650,6 @@ entry:
 define void @test_vssseg8_mask_nxv1i32(<vscale x 1 x i32> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -713,7 +671,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i
 define void @test_vssseg2_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -726,7 +683,6 @@ entry:
 define void @test_vssseg2_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -742,7 +698,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i
 define void @test_vssseg3_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
@@ -756,7 +711,6 @@ entry:
 define void @test_vssseg3_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
@@ -773,7 +727,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv8i16(<vscale x 8 x i16>,<vscale x 8 x i
 define void @test_vssseg4_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -788,7 +741,6 @@ entry:
 define void @test_vssseg4_mask_nxv8i16(<vscale x 8 x i16> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -806,7 +758,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>
 define void @test_vssseg2_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1
@@ -819,7 +770,6 @@ entry:
 define void @test_vssseg2_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1, v0.t
@@ -835,7 +785,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>
 define void @test_vssseg3_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
@@ -849,7 +798,6 @@ entry:
 define void @test_vssseg3_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf2, ta, ma
@@ -866,7 +814,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>
 define void @test_vssseg4_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -881,7 +828,6 @@ entry:
 define void @test_vssseg4_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -899,7 +845,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>
 define void @test_vssseg5_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -915,7 +860,6 @@ entry:
 define void @test_vssseg5_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -934,7 +878,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>
 define void @test_vssseg6_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -951,7 +894,6 @@ entry:
 define void @test_vssseg6_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -971,7 +913,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>
 define void @test_vssseg7_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -989,7 +930,6 @@ entry:
 define void @test_vssseg7_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1010,7 +950,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv4i8(<vscale x 4 x i8>,<vscale x 4 x i8>
 define void @test_vssseg8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1029,7 +968,6 @@ entry:
 define void @test_vssseg8_mask_nxv4i8(<vscale x 4 x i8> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1051,7 +989,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i
 define void @test_vssseg2_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -1064,7 +1001,6 @@ entry:
 define void @test_vssseg2_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -1080,7 +1016,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i
 define void @test_vssseg3_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
@@ -1094,7 +1029,6 @@ entry:
 define void @test_vssseg3_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
@@ -1111,7 +1045,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i
 define void @test_vssseg4_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1126,7 +1059,6 @@ entry:
 define void @test_vssseg4_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1144,7 +1076,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i
 define void @test_vssseg5_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1160,7 +1091,6 @@ entry:
 define void @test_vssseg5_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1179,7 +1109,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i
 define void @test_vssseg6_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1196,7 +1125,6 @@ entry:
 define void @test_vssseg6_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1216,7 +1144,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i
 define void @test_vssseg7_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1234,7 +1161,6 @@ entry:
 define void @test_vssseg7_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1255,7 +1181,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv1i16(<vscale x 1 x i16>,<vscale x 1 x i
 define void @test_vssseg8_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1274,7 +1199,6 @@ entry:
 define void @test_vssseg8_mask_nxv1i16(<vscale x 1 x i16> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1296,7 +1220,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i
 define void @test_vssseg2_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1
@@ -1309,7 +1232,6 @@ entry:
 define void @test_vssseg2_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1, v0.t
@@ -1325,7 +1247,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i
 define void @test_vssseg3_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
@@ -1339,7 +1260,6 @@ entry:
 define void @test_vssseg3_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
@@ -1356,7 +1276,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i
 define void @test_vssseg4_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1371,7 +1290,6 @@ entry:
 define void @test_vssseg4_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1389,7 +1307,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i
 define void @test_vssseg5_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1405,7 +1322,6 @@ entry:
 define void @test_vssseg5_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1424,7 +1340,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i
 define void @test_vssseg6_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1441,7 +1356,6 @@ entry:
 define void @test_vssseg6_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1461,7 +1375,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i
 define void @test_vssseg7_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1479,7 +1392,6 @@ entry:
 define void @test_vssseg7_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1500,7 +1412,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv2i32(<vscale x 2 x i32>,<vscale x 2 x i
 define void @test_vssseg8_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1519,7 +1430,6 @@ entry:
 define void @test_vssseg8_mask_nxv2i32(<vscale x 2 x i32> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1541,7 +1451,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>
 define void @test_vssseg2_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1
@@ -1554,7 +1463,6 @@ entry:
 define void @test_vssseg2_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1, v0.t
@@ -1570,7 +1478,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>
 define void @test_vssseg3_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
@@ -1584,7 +1491,6 @@ entry:
 define void @test_vssseg3_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m1, ta, ma
@@ -1601,7 +1507,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>
 define void @test_vssseg4_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1616,7 +1521,6 @@ entry:
 define void @test_vssseg4_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1634,7 +1538,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>
 define void @test_vssseg5_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1650,7 +1553,6 @@ entry:
 define void @test_vssseg5_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1669,7 +1571,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>
 define void @test_vssseg6_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1686,7 +1587,6 @@ entry:
 define void @test_vssseg6_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1706,7 +1606,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>
 define void @test_vssseg7_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1724,7 +1623,6 @@ entry:
 define void @test_vssseg7_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1745,7 +1643,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv8i8(<vscale x 8 x i8>,<vscale x 8 x i8>
 define void @test_vssseg8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1764,7 +1661,6 @@ entry:
 define void @test_vssseg8_mask_nxv8i8(<vscale x 8 x i8> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1786,7 +1682,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv4i64(<vscale x 4 x i64>,<vscale x 4 x i
 define void @test_vssseg2_nxv4i64(<vscale x 4 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1
@@ -1799,7 +1694,6 @@ entry:
 define void @test_vssseg2_mask_nxv4i64(<vscale x 4 x i64> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1, v0.t
@@ -1815,7 +1709,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i
 define void @test_vssseg2_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -1828,7 +1721,6 @@ entry:
 define void @test_vssseg2_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -1844,7 +1736,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i
 define void @test_vssseg3_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
@@ -1858,7 +1749,6 @@ entry:
 define void @test_vssseg3_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
@@ -1875,7 +1765,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i
 define void @test_vssseg4_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1890,7 +1779,6 @@ entry:
 define void @test_vssseg4_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1908,7 +1796,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i
 define void @test_vssseg5_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1924,7 +1811,6 @@ entry:
 define void @test_vssseg5_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1943,7 +1829,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i
 define void @test_vssseg6_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1960,7 +1845,6 @@ entry:
 define void @test_vssseg6_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1980,7 +1864,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i
 define void @test_vssseg7_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -1998,7 +1881,6 @@ entry:
 define void @test_vssseg7_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2019,7 +1901,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv4i16(<vscale x 4 x i16>,<vscale x 4 x i
 define void @test_vssseg8_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2038,7 +1919,6 @@ entry:
 define void @test_vssseg8_mask_nxv4i16(<vscale x 4 x i16> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2060,7 +1940,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>
 define void @test_vssseg2_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1
@@ -2073,7 +1952,6 @@ entry:
 define void @test_vssseg2_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1, v0.t
@@ -2089,7 +1967,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>
 define void @test_vssseg3_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
@@ -2103,7 +1980,6 @@ entry:
 define void @test_vssseg3_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf8, ta, ma
@@ -2120,7 +1996,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>
 define void @test_vssseg4_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2135,7 +2010,6 @@ entry:
 define void @test_vssseg4_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2153,7 +2027,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>
 define void @test_vssseg5_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2169,7 +2042,6 @@ entry:
 define void @test_vssseg5_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2188,7 +2060,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>
 define void @test_vssseg6_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2205,7 +2076,6 @@ entry:
 define void @test_vssseg6_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2225,7 +2095,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>
 define void @test_vssseg7_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2243,7 +2112,6 @@ entry:
 define void @test_vssseg7_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2264,7 +2132,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv1i8(<vscale x 1 x i8>,<vscale x 1 x i8>
 define void @test_vssseg8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2283,7 +2150,6 @@ entry:
 define void @test_vssseg8_mask_nxv1i8(<vscale x 1 x i8> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2305,7 +2171,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>
 define void @test_vssseg2_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1
@@ -2318,7 +2183,6 @@ entry:
 define void @test_vssseg2_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1, v0.t
@@ -2334,7 +2198,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>
 define void @test_vssseg3_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
@@ -2348,7 +2211,6 @@ entry:
 define void @test_vssseg3_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, mf4, ta, ma
@@ -2365,7 +2227,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>
 define void @test_vssseg4_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2380,7 +2241,6 @@ entry:
 define void @test_vssseg4_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2398,7 +2258,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>
 define void @test_vssseg5_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2414,7 +2273,6 @@ entry:
 define void @test_vssseg5_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2433,7 +2291,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>
 define void @test_vssseg6_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2450,7 +2307,6 @@ entry:
 define void @test_vssseg6_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2470,7 +2326,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>
 define void @test_vssseg7_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2488,7 +2343,6 @@ entry:
 define void @test_vssseg7_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2509,7 +2363,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv2i8(<vscale x 2 x i8>,<vscale x 2 x i8>
 define void @test_vssseg8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2528,7 +2381,6 @@ entry:
 define void @test_vssseg8_mask_nxv2i8(<vscale x 2 x i8> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2550,7 +2402,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv8i32(<vscale x 8 x i32>,<vscale x 8 x i
 define void @test_vssseg2_nxv8i32(<vscale x 8 x i32> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1
@@ -2563,7 +2414,6 @@ entry:
 define void @test_vssseg2_mask_nxv8i32(<vscale x 8 x i32> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1, v0.t
@@ -2579,7 +2429,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv32i8(<vscale x 32 x i8>,<vscale x 32 x
 define void @test_vssseg2_nxv32i8(<vscale x 32 x i8> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1
@@ -2592,7 +2441,6 @@ entry:
 define void @test_vssseg2_mask_nxv32i8(<vscale x 32 x i8> %val, ptr %base, i64 %offset, <vscale x 32 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e8, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e8.v v8, (a0), a1, v0.t
@@ -2608,7 +2456,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i
 define void @test_vssseg2_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -2621,7 +2468,6 @@ entry:
 define void @test_vssseg2_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -2637,7 +2483,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i
 define void @test_vssseg3_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
@@ -2651,7 +2496,6 @@ entry:
 define void @test_vssseg3_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
@@ -2668,7 +2512,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i
 define void @test_vssseg4_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2683,7 +2526,6 @@ entry:
 define void @test_vssseg4_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2701,7 +2543,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i
 define void @test_vssseg5_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2717,7 +2558,6 @@ entry:
 define void @test_vssseg5_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2736,7 +2576,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i
 define void @test_vssseg6_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2753,7 +2592,6 @@ entry:
 define void @test_vssseg6_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2773,7 +2611,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i
 define void @test_vssseg7_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2791,7 +2628,6 @@ entry:
 define void @test_vssseg7_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2812,7 +2648,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv2i16(<vscale x 2 x i16>,<vscale x 2 x i
 define void @test_vssseg8_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2831,7 +2666,6 @@ entry:
 define void @test_vssseg8_mask_nxv2i16(<vscale x 2 x i16> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -2853,7 +2687,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i
 define void @test_vssseg2_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1
@@ -2866,7 +2699,6 @@ entry:
 define void @test_vssseg2_mask_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1, v0.t
@@ -2882,7 +2714,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i
 define void @test_vssseg3_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
@@ -2896,7 +2727,6 @@ entry:
 define void @test_vssseg3_mask_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
@@ -2913,7 +2743,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv2i64(<vscale x 2 x i64>,<vscale x 2 x i
 define void @test_vssseg4_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -2928,7 +2757,6 @@ entry:
 define void @test_vssseg4_mask_nxv2i64(<vscale x 2 x i64> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -2946,7 +2774,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv16f16(<vscale x 16 x half>,<vscale x 16
 define void @test_vssseg2_nxv16f16(<vscale x 16 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -2959,7 +2786,6 @@ entry:
 define void @test_vssseg2_mask_nxv16f16(<vscale x 16 x half> %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv16f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -2975,7 +2801,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv4f64(<vscale x 4 x double>,<vscale x 4
 define void @test_vssseg2_nxv4f64(<vscale x 4 x double> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1
@@ -2988,7 +2813,6 @@ entry:
 define void @test_vssseg2_mask_nxv4f64(<vscale x 4 x double> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv4f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1, v0.t
@@ -3004,7 +2828,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1
 define void @test_vssseg2_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1
@@ -3017,7 +2840,6 @@ entry:
 define void @test_vssseg2_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1, v0.t
@@ -3033,7 +2855,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1
 define void @test_vssseg3_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
@@ -3047,7 +2868,6 @@ entry:
 define void @test_vssseg3_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, ma
@@ -3064,7 +2884,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1
 define void @test_vssseg4_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3079,7 +2898,6 @@ entry:
 define void @test_vssseg4_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3097,7 +2915,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1
 define void @test_vssseg5_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3113,7 +2930,6 @@ entry:
 define void @test_vssseg5_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3132,7 +2948,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1
 define void @test_vssseg6_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3149,7 +2964,6 @@ entry:
 define void @test_vssseg6_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3169,7 +2983,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1
 define void @test_vssseg7_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3187,7 +3000,6 @@ entry:
 define void @test_vssseg7_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3208,7 +3020,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv1f64(<vscale x 1 x double>,<vscale x 1
 define void @test_vssseg8_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3227,7 +3038,6 @@ entry:
 define void @test_vssseg8_mask_nxv1f64(<vscale x 1 x double> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv1f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3249,7 +3059,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vssseg2_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1
@@ -3262,7 +3071,6 @@ entry:
 define void @test_vssseg2_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1, v0.t
@@ -3278,7 +3086,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vssseg3_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
@@ -3292,7 +3099,6 @@ entry:
 define void @test_vssseg3_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m1, ta, ma
@@ -3309,7 +3115,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vssseg4_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3324,7 +3129,6 @@ entry:
 define void @test_vssseg4_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3342,7 +3146,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vssseg5_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3358,7 +3161,6 @@ entry:
 define void @test_vssseg5_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3377,7 +3179,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vssseg6_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3394,7 +3195,6 @@ entry:
 define void @test_vssseg6_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3414,7 +3214,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vssseg7_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3432,7 +3231,6 @@ entry:
 define void @test_vssseg7_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3453,7 +3251,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv2f32(<vscale x 2 x float>,<vscale x 2 x
 define void @test_vssseg8_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3472,7 +3269,6 @@ entry:
 define void @test_vssseg8_mask_nxv2f32(<vscale x 2 x float> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv2f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3494,7 +3290,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x
 define void @test_vssseg2_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -3507,7 +3302,6 @@ entry:
 define void @test_vssseg2_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -3523,7 +3317,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x
 define void @test_vssseg3_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
@@ -3537,7 +3330,6 @@ entry:
 define void @test_vssseg3_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf4, ta, ma
@@ -3554,7 +3346,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x
 define void @test_vssseg4_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3569,7 +3360,6 @@ entry:
 define void @test_vssseg4_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3587,7 +3377,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x
 define void @test_vssseg5_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3603,7 +3392,6 @@ entry:
 define void @test_vssseg5_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3622,7 +3410,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x
 define void @test_vssseg6_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3639,7 +3426,6 @@ entry:
 define void @test_vssseg6_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3659,7 +3445,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x
 define void @test_vssseg7_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3677,7 +3462,6 @@ entry:
 define void @test_vssseg7_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3698,7 +3482,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv1f16(<vscale x 1 x half>,<vscale x 1 x
 define void @test_vssseg8_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3717,7 +3500,6 @@ entry:
 define void @test_vssseg8_mask_nxv1f16(<vscale x 1 x half> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv1f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3739,7 +3521,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vssseg2_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1
@@ -3752,7 +3533,6 @@ entry:
 define void @test_vssseg2_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1, v0.t
@@ -3768,7 +3548,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vssseg3_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
@@ -3782,7 +3561,6 @@ entry:
 define void @test_vssseg3_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, mf2, ta, ma
@@ -3799,7 +3577,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vssseg4_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3814,7 +3591,6 @@ entry:
 define void @test_vssseg4_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3832,7 +3608,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vssseg5_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3848,7 +3623,6 @@ entry:
 define void @test_vssseg5_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3867,7 +3641,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vssseg6_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3884,7 +3657,6 @@ entry:
 define void @test_vssseg6_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3904,7 +3676,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vssseg7_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3922,7 +3693,6 @@ entry:
 define void @test_vssseg7_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3943,7 +3713,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv1f32(<vscale x 1 x float>,<vscale x 1 x
 define void @test_vssseg8_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3962,7 +3731,6 @@ entry:
 define void @test_vssseg8_mask_nxv1f32(<vscale x 1 x float> %val, ptr %base, i64 %offset, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv1f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3984,7 +3752,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x
 define void @test_vssseg2_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -3997,7 +3764,6 @@ entry:
 define void @test_vssseg2_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -4013,7 +3779,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x
 define void @test_vssseg3_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
@@ -4027,7 +3792,6 @@ entry:
 define void @test_vssseg3_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m2, ta, ma
@@ -4044,7 +3808,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv8f16(<vscale x 8 x half>,<vscale x 8 x
 define void @test_vssseg4_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -4059,7 +3822,6 @@ entry:
 define void @test_vssseg4_mask_nxv8f16(<vscale x 8 x half> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv8f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -4077,7 +3839,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv8f32(<vscale x 8 x float>,<vscale x 8 x
 define void @test_vssseg2_nxv8f32(<vscale x 8 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1
@@ -4090,7 +3851,6 @@ entry:
 define void @test_vssseg2_mask_nxv8f32(<vscale x 8 x float> %val, ptr %base, i64 %offset, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv8f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1, v0.t
@@ -4106,7 +3866,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2
 define void @test_vssseg2_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1
@@ -4119,7 +3878,6 @@ entry:
 define void @test_vssseg2_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e64.v v8, (a0), a1, v0.t
@@ -4135,7 +3893,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2
 define void @test_vssseg3_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
@@ -4149,7 +3906,6 @@ entry:
 define void @test_vssseg3_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, ma
@@ -4166,7 +3922,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv2f64(<vscale x 2 x double>,<vscale x 2
 define void @test_vssseg4_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -4181,7 +3936,6 @@ entry:
 define void @test_vssseg4_mask_nxv2f64(<vscale x 2 x double> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv2f64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -4199,7 +3953,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x
 define void @test_vssseg2_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -4212,7 +3965,6 @@ entry:
 define void @test_vssseg2_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -4228,7 +3980,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x
 define void @test_vssseg3_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
@@ -4242,7 +3993,6 @@ entry:
 define void @test_vssseg3_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m1, ta, ma
@@ -4259,7 +4009,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x
 define void @test_vssseg4_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4274,7 +4023,6 @@ entry:
 define void @test_vssseg4_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4292,7 +4040,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x
 define void @test_vssseg5_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4308,7 +4055,6 @@ entry:
 define void @test_vssseg5_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4327,7 +4073,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x
 define void @test_vssseg6_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4344,7 +4089,6 @@ entry:
 define void @test_vssseg6_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4364,7 +4108,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x
 define void @test_vssseg7_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4382,7 +4125,6 @@ entry:
 define void @test_vssseg7_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4403,7 +4145,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv4f16(<vscale x 4 x half>,<vscale x 4 x
 define void @test_vssseg8_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4422,7 +4163,6 @@ entry:
 define void @test_vssseg8_mask_nxv4f16(<vscale x 4 x half> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv4f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4444,7 +4184,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x
 define void @test_vssseg2_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -4457,7 +4196,6 @@ entry:
 define void @test_vssseg2_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -4473,7 +4211,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x
 define void @test_vssseg3_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
@@ -4487,7 +4224,6 @@ entry:
 define void @test_vssseg3_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e16, mf2, ta, ma
@@ -4504,7 +4240,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x
 define void @test_vssseg4_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4519,7 +4254,6 @@ entry:
 define void @test_vssseg4_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4537,7 +4271,6 @@ declare void @llvm.riscv.vssseg5.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x
 define void @test_vssseg5_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4553,7 +4286,6 @@ entry:
 define void @test_vssseg5_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg5_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4572,7 +4304,6 @@ declare void @llvm.riscv.vssseg6.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x
 define void @test_vssseg6_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4589,7 +4320,6 @@ entry:
 define void @test_vssseg6_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg6_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4609,7 +4339,6 @@ declare void @llvm.riscv.vssseg7.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x
 define void @test_vssseg7_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4627,7 +4356,6 @@ entry:
 define void @test_vssseg7_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg7_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4648,7 +4376,6 @@ declare void @llvm.riscv.vssseg8.mask.nxv2f16(<vscale x 2 x half>,<vscale x 2 x
 define void @test_vssseg8_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4667,7 +4394,6 @@ entry:
 define void @test_vssseg8_mask_nxv2f16(<vscale x 2 x half> %val, ptr %base, i64 %offset, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg8_mask_nxv2f16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -4689,7 +4415,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x
 define void @test_vssseg2_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1
@@ -4702,7 +4427,6 @@ entry:
 define void @test_vssseg2_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
 ; CHECK-NEXT:    vssseg2e32.v v8, (a0), a1, v0.t
@@ -4718,7 +4442,6 @@ declare void @llvm.riscv.vssseg3.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x
 define void @test_vssseg3_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
@@ -4732,7 +4455,6 @@ entry:
 define void @test_vssseg3_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg3_mask_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m2, ta, ma
@@ -4749,7 +4471,6 @@ declare void @llvm.riscv.vssseg4.mask.nxv4f32(<vscale x 4 x float>,<vscale x 4 x
 define void @test_vssseg4_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %offset, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -4764,7 +4485,6 @@ entry:
 define void @test_vssseg4_mask_nxv4f32(<vscale x 4 x float> %val, ptr %base, i64 %offset, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vssseg4_mask_nxv4f32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll
index bbe7c8ac70cf6..c8a56f1ef7cce 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv32.ll
@@ -8,7 +8,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16>,<vs
 define void @test_vsuxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -22,7 +21,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -39,7 +37,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16>,<vsc
 define void @test_vsuxseg2_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -53,7 +50,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -70,7 +66,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16>,<vs
 define void @test_vsuxseg2_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
@@ -83,7 +78,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
@@ -99,7 +93,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x
 define void @test_vsuxseg2_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -113,7 +106,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -130,7 +122,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale
 define void @test_vsuxseg2_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -144,7 +135,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -161,7 +151,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale
 define void @test_vsuxseg2_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -175,7 +164,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -193,8 +181,8 @@ define void @test_vsuxseg3_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -207,8 +195,8 @@ define void @test_vsuxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -224,8 +212,8 @@ define void @test_vsuxseg3_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -238,8 +226,8 @@ define void @test_vsuxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -255,8 +243,8 @@ define void @test_vsuxseg3_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -269,8 +257,8 @@ define void @test_vsuxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -286,9 +274,9 @@ define void @test_vsuxseg4_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -301,9 +289,9 @@ define void @test_vsuxseg4_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -319,9 +307,9 @@ define void @test_vsuxseg4_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -334,9 +322,9 @@ define void @test_vsuxseg4_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -352,9 +340,9 @@ define void @test_vsuxseg4_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -367,9 +355,9 @@ define void @test_vsuxseg4_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -385,10 +373,10 @@ define void @test_vsuxseg5_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -401,10 +389,10 @@ define void @test_vsuxseg5_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -420,10 +408,10 @@ define void @test_vsuxseg5_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -436,10 +424,10 @@ define void @test_vsuxseg5_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -455,10 +443,10 @@ define void @test_vsuxseg5_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -471,10 +459,10 @@ define void @test_vsuxseg5_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -490,11 +478,11 @@ define void @test_vsuxseg6_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -507,11 +495,11 @@ define void @test_vsuxseg6_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -527,11 +515,11 @@ define void @test_vsuxseg6_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -544,11 +532,11 @@ define void @test_vsuxseg6_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -564,11 +552,11 @@ define void @test_vsuxseg6_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -581,11 +569,11 @@ define void @test_vsuxseg6_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -601,12 +589,12 @@ define void @test_vsuxseg7_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -619,12 +607,12 @@ define void @test_vsuxseg7_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -640,12 +628,12 @@ define void @test_vsuxseg7_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -658,12 +646,12 @@ define void @test_vsuxseg7_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -679,12 +667,12 @@ define void @test_vsuxseg7_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -697,12 +685,12 @@ define void @test_vsuxseg7_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -718,13 +706,13 @@ define void @test_vsuxseg8_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -737,13 +725,13 @@ define void @test_vsuxseg8_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -759,13 +747,13 @@ define void @test_vsuxseg8_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -778,13 +766,13 @@ define void @test_vsuxseg8_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -800,13 +788,13 @@ define void @test_vsuxseg8_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -819,13 +807,13 @@ define void @test_vsuxseg8_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -840,7 +828,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vsca
 define void @test_vsuxseg2_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
@@ -853,7 +840,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
@@ -869,7 +855,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscal
 define void @test_vsuxseg2_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -883,7 +868,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -900,7 +884,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vsca
 define void @test_vsuxseg2_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
@@ -913,7 +896,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
@@ -929,11 +911,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vsca
 define void @test_vsuxseg3_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei16.v v16, (a0), v12
+; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl)
@@ -943,11 +925,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei16.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl)
@@ -961,8 +943,8 @@ define void @test_vsuxseg3_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -975,8 +957,8 @@ define void @test_vsuxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -991,7 +973,6 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vsca
 define void @test_vsuxseg3_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1005,7 +986,6 @@ entry:
 define void @test_vsuxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -1023,9 +1003,9 @@ define void @test_vsuxseg4_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -1038,9 +1018,9 @@ define void @test_vsuxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -1056,9 +1036,9 @@ define void @test_vsuxseg4_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -1071,9 +1051,9 @@ define void @test_vsuxseg4_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -1088,7 +1068,6 @@ declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vsca
 define void @test_vsuxseg4_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -1103,7 +1082,6 @@ entry:
 define void @test_vsuxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -1121,7 +1099,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscal
 define void @test_vsuxseg2_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1135,7 +1112,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1152,7 +1128,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale
 define void @test_vsuxseg2_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1166,7 +1141,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1183,7 +1157,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscal
 define void @test_vsuxseg2_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1197,7 +1170,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -1215,8 +1187,8 @@ define void @test_vsuxseg3_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1229,8 +1201,8 @@ define void @test_vsuxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1246,8 +1218,8 @@ define void @test_vsuxseg3_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1260,8 +1232,8 @@ define void @test_vsuxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1277,8 +1249,8 @@ define void @test_vsuxseg3_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1291,8 +1263,8 @@ define void @test_vsuxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1308,9 +1280,9 @@ define void @test_vsuxseg4_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1323,9 +1295,9 @@ define void @test_vsuxseg4_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1341,9 +1313,9 @@ define void @test_vsuxseg4_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1356,9 +1328,9 @@ define void @test_vsuxseg4_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1374,9 +1346,9 @@ define void @test_vsuxseg4_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1389,9 +1361,9 @@ define void @test_vsuxseg4_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1407,10 +1379,10 @@ define void @test_vsuxseg5_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1423,10 +1395,10 @@ define void @test_vsuxseg5_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1442,10 +1414,10 @@ define void @test_vsuxseg5_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1458,10 +1430,10 @@ define void @test_vsuxseg5_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1477,10 +1449,10 @@ define void @test_vsuxseg5_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1493,10 +1465,10 @@ define void @test_vsuxseg5_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1512,11 +1484,11 @@ define void @test_vsuxseg6_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1529,11 +1501,11 @@ define void @test_vsuxseg6_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1549,11 +1521,11 @@ define void @test_vsuxseg6_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1566,11 +1538,11 @@ define void @test_vsuxseg6_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1586,11 +1558,11 @@ define void @test_vsuxseg6_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1603,11 +1575,11 @@ define void @test_vsuxseg6_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1623,12 +1595,12 @@ define void @test_vsuxseg7_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1641,12 +1613,12 @@ define void @test_vsuxseg7_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1662,12 +1634,12 @@ define void @test_vsuxseg7_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1680,12 +1652,12 @@ define void @test_vsuxseg7_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1701,12 +1673,12 @@ define void @test_vsuxseg7_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1719,12 +1691,12 @@ define void @test_vsuxseg7_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1740,13 +1712,13 @@ define void @test_vsuxseg8_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1759,13 +1731,13 @@ define void @test_vsuxseg8_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1781,13 +1753,13 @@ define void @test_vsuxseg8_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1800,13 +1772,13 @@ define void @test_vsuxseg8_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1822,13 +1794,13 @@ define void @test_vsuxseg8_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1841,13 +1813,13 @@ define void @test_vsuxseg8_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1862,7 +1834,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscal
 define void @test_vsuxseg2_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1876,7 +1847,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1893,7 +1863,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale
 define void @test_vsuxseg2_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1907,7 +1876,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -1924,7 +1892,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscal
 define void @test_vsuxseg2_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
@@ -1937,7 +1904,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
@@ -1954,8 +1920,8 @@ define void @test_vsuxseg3_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1968,8 +1934,8 @@ define void @test_vsuxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1985,8 +1951,8 @@ define void @test_vsuxseg3_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1999,8 +1965,8 @@ define void @test_vsuxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2015,11 +1981,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscal
 define void @test_vsuxseg3_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
@@ -2029,11 +1995,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
@@ -2047,9 +2013,9 @@ define void @test_vsuxseg4_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2062,9 +2028,9 @@ define void @test_vsuxseg4_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2080,9 +2046,9 @@ define void @test_vsuxseg4_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2095,9 +2061,9 @@ define void @test_vsuxseg4_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2113,9 +2079,9 @@ define void @test_vsuxseg4_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -2128,9 +2094,9 @@ define void @test_vsuxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -2146,10 +2112,10 @@ define void @test_vsuxseg5_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2162,10 +2128,10 @@ define void @test_vsuxseg5_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2181,10 +2147,10 @@ define void @test_vsuxseg5_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2197,10 +2163,10 @@ define void @test_vsuxseg5_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2216,10 +2182,10 @@ define void @test_vsuxseg5_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -2232,10 +2198,10 @@ define void @test_vsuxseg5_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -2251,11 +2217,11 @@ define void @test_vsuxseg6_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2268,11 +2234,11 @@ define void @test_vsuxseg6_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2288,11 +2254,11 @@ define void @test_vsuxseg6_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2305,11 +2271,11 @@ define void @test_vsuxseg6_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2325,11 +2291,11 @@ define void @test_vsuxseg6_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -2342,11 +2308,11 @@ define void @test_vsuxseg6_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -2362,12 +2328,12 @@ define void @test_vsuxseg7_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2380,12 +2346,12 @@ define void @test_vsuxseg7_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2401,12 +2367,12 @@ define void @test_vsuxseg7_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2419,12 +2385,12 @@ define void @test_vsuxseg7_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2440,12 +2406,12 @@ define void @test_vsuxseg7_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -2458,12 +2424,12 @@ define void @test_vsuxseg7_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -2479,13 +2445,13 @@ define void @test_vsuxseg8_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2498,13 +2464,13 @@ define void @test_vsuxseg8_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2520,13 +2486,13 @@ define void @test_vsuxseg8_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2539,13 +2505,13 @@ define void @test_vsuxseg8_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2561,13 +2527,13 @@ define void @test_vsuxseg8_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -2580,13 +2546,13 @@ define void @test_vsuxseg8_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -2601,7 +2567,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale
 define void @test_vsuxseg2_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2615,7 +2580,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2632,7 +2596,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscal
 define void @test_vsuxseg2_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2646,7 +2609,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2663,7 +2625,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscal
 define void @test_vsuxseg2_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2677,7 +2638,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -2695,8 +2655,8 @@ define void @test_vsuxseg3_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2709,8 +2669,8 @@ define void @test_vsuxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2726,8 +2686,8 @@ define void @test_vsuxseg3_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2740,8 +2700,8 @@ define void @test_vsuxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2757,8 +2717,8 @@ define void @test_vsuxseg3_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2771,8 +2731,8 @@ define void @test_vsuxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2788,9 +2748,9 @@ define void @test_vsuxseg4_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2803,9 +2763,9 @@ define void @test_vsuxseg4_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2821,9 +2781,9 @@ define void @test_vsuxseg4_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2836,9 +2796,9 @@ define void @test_vsuxseg4_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2854,9 +2814,9 @@ define void @test_vsuxseg4_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2869,9 +2829,9 @@ define void @test_vsuxseg4_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2887,10 +2847,10 @@ define void @test_vsuxseg5_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2903,10 +2863,10 @@ define void @test_vsuxseg5_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2922,10 +2882,10 @@ define void @test_vsuxseg5_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2938,10 +2898,10 @@ define void @test_vsuxseg5_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2957,10 +2917,10 @@ define void @test_vsuxseg5_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2973,10 +2933,10 @@ define void @test_vsuxseg5_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2992,11 +2952,11 @@ define void @test_vsuxseg6_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3009,11 +2969,11 @@ define void @test_vsuxseg6_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3029,11 +2989,11 @@ define void @test_vsuxseg6_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3046,11 +3006,11 @@ define void @test_vsuxseg6_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3066,11 +3026,11 @@ define void @test_vsuxseg6_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3083,11 +3043,11 @@ define void @test_vsuxseg6_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3103,12 +3063,12 @@ define void @test_vsuxseg7_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3121,12 +3081,12 @@ define void @test_vsuxseg7_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3142,12 +3102,12 @@ define void @test_vsuxseg7_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3160,12 +3120,12 @@ define void @test_vsuxseg7_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3181,12 +3141,12 @@ define void @test_vsuxseg7_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3199,12 +3159,12 @@ define void @test_vsuxseg7_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3220,13 +3180,13 @@ define void @test_vsuxseg8_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3239,13 +3199,13 @@ define void @test_vsuxseg8_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3261,13 +3221,13 @@ define void @test_vsuxseg8_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3280,13 +3240,13 @@ define void @test_vsuxseg8_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3302,13 +3262,13 @@ define void @test_vsuxseg8_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3321,13 +3281,13 @@ define void @test_vsuxseg8_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3342,7 +3302,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscal
 define void @test_vsuxseg2_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -3356,7 +3315,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -3373,7 +3331,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale
 define void @test_vsuxseg2_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -3387,7 +3344,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -3404,7 +3360,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscal
 define void @test_vsuxseg2_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
@@ -3417,7 +3372,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
@@ -3434,8 +3388,8 @@ define void @test_vsuxseg3_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3448,8 +3402,8 @@ define void @test_vsuxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3465,8 +3419,8 @@ define void @test_vsuxseg3_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3479,8 +3433,8 @@ define void @test_vsuxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3495,11 +3449,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscal
 define void @test_vsuxseg3_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
@@ -3509,11 +3463,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
@@ -3527,10 +3481,10 @@ define void @test_vsuxseg4_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
 entry:
@@ -3542,9 +3496,9 @@ define void @test_vsuxseg4_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3560,9 +3514,9 @@ define void @test_vsuxseg4_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3575,9 +3529,9 @@ define void @test_vsuxseg4_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3593,9 +3547,9 @@ define void @test_vsuxseg4_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -3608,9 +3562,9 @@ define void @test_vsuxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -3625,7 +3579,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg2_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10
@@ -3638,7 +3591,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10, v0.t
@@ -3654,7 +3606,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x
 define void @test_vsuxseg2_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -3668,7 +3619,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -3685,7 +3635,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg2_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
@@ -3698,7 +3647,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
@@ -3714,11 +3662,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg3_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10
+; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl)
@@ -3728,11 +3676,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl)
@@ -3746,8 +3694,8 @@ define void @test_vsuxseg3_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3760,8 +3708,8 @@ define void @test_vsuxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3776,7 +3724,6 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg3_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -3790,7 +3737,6 @@ entry:
 define void @test_vsuxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -3808,9 +3754,9 @@ define void @test_vsuxseg4_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3823,9 +3769,9 @@ define void @test_vsuxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3841,9 +3787,9 @@ define void @test_vsuxseg4_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3856,9 +3802,9 @@ define void @test_vsuxseg4_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3873,7 +3819,6 @@ declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg4_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3888,7 +3833,6 @@ entry:
 define void @test_vsuxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3907,10 +3851,10 @@ define void @test_vsuxseg5_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3923,10 +3867,10 @@ define void @test_vsuxseg5_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3942,10 +3886,10 @@ define void @test_vsuxseg5_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3958,10 +3902,10 @@ define void @test_vsuxseg5_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3976,13 +3920,13 @@ declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg5_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsuxseg5ei32.v v16, (a0), v12
+; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
@@ -3992,13 +3936,13 @@ entry:
 define void @test_vsuxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsuxseg5ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
@@ -4012,11 +3956,11 @@ define void @test_vsuxseg6_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -4029,11 +3973,11 @@ define void @test_vsuxseg6_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -4049,11 +3993,11 @@ define void @test_vsuxseg6_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4066,11 +4010,11 @@ define void @test_vsuxseg6_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4086,11 +4030,11 @@ define void @test_vsuxseg6_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -4103,11 +4047,11 @@ define void @test_vsuxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -4123,12 +4067,12 @@ define void @test_vsuxseg7_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -4141,12 +4085,12 @@ define void @test_vsuxseg7_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -4162,12 +4106,12 @@ define void @test_vsuxseg7_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4180,12 +4124,12 @@ define void @test_vsuxseg7_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4201,12 +4145,12 @@ define void @test_vsuxseg7_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -4219,12 +4163,12 @@ define void @test_vsuxseg7_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -4240,13 +4184,13 @@ define void @test_vsuxseg8_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -4259,13 +4203,13 @@ define void @test_vsuxseg8_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -4281,13 +4225,13 @@ define void @test_vsuxseg8_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4300,13 +4244,13 @@ define void @test_vsuxseg8_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4322,13 +4266,13 @@ define void @test_vsuxseg8_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -4341,13 +4285,13 @@ define void @test_vsuxseg8_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -4362,7 +4306,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32>,<vscal
 define void @test_vsuxseg2_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4376,7 +4319,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4393,7 +4335,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32>,<vscale
 define void @test_vsuxseg2_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4407,7 +4348,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4424,7 +4364,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32>,<vscal
 define void @test_vsuxseg2_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4438,7 +4377,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -4455,7 +4393,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale
 define void @test_vsuxseg2_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -4469,7 +4406,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -4486,7 +4422,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x
 define void @test_vsuxseg2_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -4500,7 +4435,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -4517,7 +4451,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale
 define void @test_vsuxseg2_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
@@ -4530,7 +4463,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
@@ -4547,8 +4479,8 @@ define void @test_vsuxseg3_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4561,8 +4493,8 @@ define void @test_vsuxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4578,8 +4510,8 @@ define void @test_vsuxseg3_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4592,8 +4524,8 @@ define void @test_vsuxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4608,11 +4540,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale
 define void @test_vsuxseg3_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
@@ -4622,11 +4554,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
@@ -4640,9 +4572,9 @@ define void @test_vsuxseg4_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4655,9 +4587,9 @@ define void @test_vsuxseg4_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4673,9 +4605,9 @@ define void @test_vsuxseg4_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4688,9 +4620,9 @@ define void @test_vsuxseg4_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4706,9 +4638,9 @@ define void @test_vsuxseg4_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -4721,9 +4653,9 @@ define void @test_vsuxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -4739,10 +4671,10 @@ define void @test_vsuxseg5_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4755,10 +4687,10 @@ define void @test_vsuxseg5_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4774,10 +4706,10 @@ define void @test_vsuxseg5_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4790,10 +4722,10 @@ define void @test_vsuxseg5_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4809,10 +4741,10 @@ define void @test_vsuxseg5_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -4825,10 +4757,10 @@ define void @test_vsuxseg5_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -4844,11 +4776,11 @@ define void @test_vsuxseg6_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4861,11 +4793,11 @@ define void @test_vsuxseg6_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4881,11 +4813,11 @@ define void @test_vsuxseg6_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4898,11 +4830,11 @@ define void @test_vsuxseg6_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4918,11 +4850,11 @@ define void @test_vsuxseg6_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -4935,11 +4867,11 @@ define void @test_vsuxseg6_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -4955,12 +4887,12 @@ define void @test_vsuxseg7_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4973,12 +4905,12 @@ define void @test_vsuxseg7_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4994,12 +4926,12 @@ define void @test_vsuxseg7_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5012,12 +4944,12 @@ define void @test_vsuxseg7_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5033,12 +4965,12 @@ define void @test_vsuxseg7_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -5051,12 +4983,12 @@ define void @test_vsuxseg7_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -5072,13 +5004,13 @@ define void @test_vsuxseg8_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5091,13 +5023,13 @@ define void @test_vsuxseg8_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5113,13 +5045,13 @@ define void @test_vsuxseg8_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5132,13 +5064,13 @@ define void @test_vsuxseg8_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5154,13 +5086,13 @@ define void @test_vsuxseg8_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -5173,13 +5105,13 @@ define void @test_vsuxseg8_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -5194,7 +5126,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale
 define void @test_vsuxseg2_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -5208,7 +5139,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -5225,7 +5155,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscal
 define void @test_vsuxseg2_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -5239,7 +5168,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -5256,7 +5184,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscal
 define void @test_vsuxseg2_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -5270,7 +5197,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -5288,8 +5214,8 @@ define void @test_vsuxseg3_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5302,8 +5228,8 @@ define void @test_vsuxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5319,8 +5245,8 @@ define void @test_vsuxseg3_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5333,8 +5259,8 @@ define void @test_vsuxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5350,8 +5276,8 @@ define void @test_vsuxseg3_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5364,8 +5290,8 @@ define void @test_vsuxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5381,9 +5307,9 @@ define void @test_vsuxseg4_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5396,9 +5322,9 @@ define void @test_vsuxseg4_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5414,9 +5340,9 @@ define void @test_vsuxseg4_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5429,9 +5355,9 @@ define void @test_vsuxseg4_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5447,9 +5373,9 @@ define void @test_vsuxseg4_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5462,9 +5388,9 @@ define void @test_vsuxseg4_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5480,10 +5406,10 @@ define void @test_vsuxseg5_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5496,10 +5422,10 @@ define void @test_vsuxseg5_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5515,10 +5441,10 @@ define void @test_vsuxseg5_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5531,10 +5457,10 @@ define void @test_vsuxseg5_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5550,10 +5476,10 @@ define void @test_vsuxseg5_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5566,10 +5492,10 @@ define void @test_vsuxseg5_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5585,11 +5511,11 @@ define void @test_vsuxseg6_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5602,11 +5528,11 @@ define void @test_vsuxseg6_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5622,11 +5548,11 @@ define void @test_vsuxseg6_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5639,11 +5565,11 @@ define void @test_vsuxseg6_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5659,11 +5585,11 @@ define void @test_vsuxseg6_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5676,11 +5602,11 @@ define void @test_vsuxseg6_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5696,12 +5622,12 @@ define void @test_vsuxseg7_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5714,12 +5640,12 @@ define void @test_vsuxseg7_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5735,12 +5661,12 @@ define void @test_vsuxseg7_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5753,12 +5679,12 @@ define void @test_vsuxseg7_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5774,12 +5700,12 @@ define void @test_vsuxseg7_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5792,12 +5718,12 @@ define void @test_vsuxseg7_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5813,13 +5739,13 @@ define void @test_vsuxseg8_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5832,13 +5758,13 @@ define void @test_vsuxseg8_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5854,13 +5780,13 @@ define void @test_vsuxseg8_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5873,13 +5799,13 @@ define void @test_vsuxseg8_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5895,13 +5821,13 @@ define void @test_vsuxseg8_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5914,13 +5840,13 @@ define void @test_vsuxseg8_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5935,7 +5861,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8>,<vsca
 define void @test_vsuxseg2_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
@@ -5948,7 +5873,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
@@ -5964,7 +5888,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8>,<vscal
 define void @test_vsuxseg2_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -5978,7 +5901,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -5995,7 +5917,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale
 define void @test_vsuxseg2_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -6009,7 +5930,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -6026,7 +5946,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x
 define void @test_vsuxseg2_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -6040,7 +5959,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -6057,7 +5975,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale
 define void @test_vsuxseg2_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -6071,7 +5988,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -6089,8 +6005,8 @@ define void @test_vsuxseg3_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6103,8 +6019,8 @@ define void @test_vsuxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6120,8 +6036,8 @@ define void @test_vsuxseg3_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6134,8 +6050,8 @@ define void @test_vsuxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6151,8 +6067,8 @@ define void @test_vsuxseg3_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6165,8 +6081,8 @@ define void @test_vsuxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6182,9 +6098,9 @@ define void @test_vsuxseg4_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6197,9 +6113,9 @@ define void @test_vsuxseg4_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6215,9 +6131,9 @@ define void @test_vsuxseg4_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6230,9 +6146,9 @@ define void @test_vsuxseg4_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6248,9 +6164,9 @@ define void @test_vsuxseg4_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6263,9 +6179,9 @@ define void @test_vsuxseg4_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6281,10 +6197,10 @@ define void @test_vsuxseg5_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6297,10 +6213,10 @@ define void @test_vsuxseg5_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6316,10 +6232,10 @@ define void @test_vsuxseg5_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6332,10 +6248,10 @@ define void @test_vsuxseg5_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6351,10 +6267,10 @@ define void @test_vsuxseg5_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6367,10 +6283,10 @@ define void @test_vsuxseg5_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6386,11 +6302,11 @@ define void @test_vsuxseg6_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6403,11 +6319,11 @@ define void @test_vsuxseg6_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6423,11 +6339,11 @@ define void @test_vsuxseg6_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6440,11 +6356,11 @@ define void @test_vsuxseg6_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6460,11 +6376,11 @@ define void @test_vsuxseg6_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6477,11 +6393,11 @@ define void @test_vsuxseg6_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6497,12 +6413,12 @@ define void @test_vsuxseg7_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6515,12 +6431,12 @@ define void @test_vsuxseg7_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6536,12 +6452,12 @@ define void @test_vsuxseg7_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6554,12 +6470,12 @@ define void @test_vsuxseg7_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6575,12 +6491,12 @@ define void @test_vsuxseg7_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6593,12 +6509,12 @@ define void @test_vsuxseg7_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6614,13 +6530,13 @@ define void @test_vsuxseg8_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6633,13 +6549,13 @@ define void @test_vsuxseg8_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6655,13 +6571,13 @@ define void @test_vsuxseg8_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6674,13 +6590,13 @@ define void @test_vsuxseg8_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6696,13 +6612,13 @@ define void @test_vsuxseg8_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6715,13 +6631,13 @@ define void @test_vsuxseg8_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6736,7 +6652,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscal
 define void @test_vsuxseg2_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -6750,7 +6665,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -6767,7 +6681,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale
 define void @test_vsuxseg2_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -6781,7 +6694,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -6798,7 +6710,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscal
 define void @test_vsuxseg2_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -6812,7 +6723,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -6830,8 +6740,8 @@ define void @test_vsuxseg3_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6844,8 +6754,8 @@ define void @test_vsuxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6861,8 +6771,8 @@ define void @test_vsuxseg3_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6875,8 +6785,8 @@ define void @test_vsuxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6892,8 +6802,8 @@ define void @test_vsuxseg3_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6906,8 +6816,8 @@ define void @test_vsuxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6923,9 +6833,9 @@ define void @test_vsuxseg4_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6938,9 +6848,9 @@ define void @test_vsuxseg4_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6956,9 +6866,9 @@ define void @test_vsuxseg4_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6971,9 +6881,9 @@ define void @test_vsuxseg4_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6989,9 +6899,9 @@ define void @test_vsuxseg4_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7004,9 +6914,9 @@ define void @test_vsuxseg4_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7022,10 +6932,10 @@ define void @test_vsuxseg5_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7038,10 +6948,10 @@ define void @test_vsuxseg5_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7057,10 +6967,10 @@ define void @test_vsuxseg5_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7073,10 +6983,10 @@ define void @test_vsuxseg5_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7092,10 +7002,10 @@ define void @test_vsuxseg5_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7108,10 +7018,10 @@ define void @test_vsuxseg5_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7127,11 +7037,11 @@ define void @test_vsuxseg6_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7144,11 +7054,11 @@ define void @test_vsuxseg6_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7164,11 +7074,11 @@ define void @test_vsuxseg6_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7181,11 +7091,11 @@ define void @test_vsuxseg6_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7201,11 +7111,11 @@ define void @test_vsuxseg6_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7218,11 +7128,11 @@ define void @test_vsuxseg6_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7238,13 +7148,13 @@ define void @test_vsuxseg7_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
 entry:
@@ -7256,12 +7166,12 @@ define void @test_vsuxseg7_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7277,12 +7187,12 @@ define void @test_vsuxseg7_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7295,12 +7205,12 @@ define void @test_vsuxseg7_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7316,12 +7226,12 @@ define void @test_vsuxseg7_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7334,12 +7244,12 @@ define void @test_vsuxseg7_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7355,13 +7265,13 @@ define void @test_vsuxseg8_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7374,13 +7284,13 @@ define void @test_vsuxseg8_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7396,13 +7306,13 @@ define void @test_vsuxseg8_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7415,13 +7325,13 @@ define void @test_vsuxseg8_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7437,13 +7347,13 @@ define void @test_vsuxseg8_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7456,13 +7366,13 @@ define void @test_vsuxseg8_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7477,7 +7387,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscal
 define void @test_vsuxseg2_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -7491,7 +7400,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -7508,7 +7416,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale
 define void @test_vsuxseg2_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -7522,7 +7429,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -7539,7 +7445,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscal
 define void @test_vsuxseg2_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -7553,7 +7458,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -7571,8 +7475,8 @@ define void @test_vsuxseg3_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7585,8 +7489,8 @@ define void @test_vsuxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7602,8 +7506,8 @@ define void @test_vsuxseg3_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7616,8 +7520,8 @@ define void @test_vsuxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7633,8 +7537,8 @@ define void @test_vsuxseg3_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7647,8 +7551,8 @@ define void @test_vsuxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7664,9 +7568,9 @@ define void @test_vsuxseg4_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7679,9 +7583,9 @@ define void @test_vsuxseg4_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7697,9 +7601,9 @@ define void @test_vsuxseg4_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7712,9 +7616,9 @@ define void @test_vsuxseg4_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7730,9 +7634,9 @@ define void @test_vsuxseg4_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7745,9 +7649,9 @@ define void @test_vsuxseg4_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7762,7 +7666,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half>,<v
 define void @test_vsuxseg2_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -7776,7 +7679,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -7793,7 +7695,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half>,<vs
 define void @test_vsuxseg2_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -7807,7 +7708,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -7824,7 +7724,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half>,<v
 define void @test_vsuxseg2_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
@@ -7837,7 +7736,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
@@ -7853,7 +7751,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double>,<vs
 define void @test_vsuxseg2_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7867,7 +7764,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7884,7 +7780,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double>,<vsc
 define void @test_vsuxseg2_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7898,7 +7793,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7915,7 +7809,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double>,<vs
 define void @test_vsuxseg2_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7929,7 +7822,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7946,7 +7838,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vsc
 define void @test_vsuxseg2_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -7960,7 +7851,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -7977,7 +7867,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vs
 define void @test_vsuxseg2_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -7991,7 +7880,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -8008,7 +7896,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vs
 define void @test_vsuxseg2_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -8022,7 +7909,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -8040,8 +7926,8 @@ define void @test_vsuxseg3_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8054,8 +7940,8 @@ define void @test_vsuxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8071,8 +7957,8 @@ define void @test_vsuxseg3_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8085,8 +7971,8 @@ define void @test_vsuxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8102,8 +7988,8 @@ define void @test_vsuxseg3_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8116,8 +8002,8 @@ define void @test_vsuxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8133,9 +8019,9 @@ define void @test_vsuxseg4_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8148,9 +8034,9 @@ define void @test_vsuxseg4_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8166,9 +8052,9 @@ define void @test_vsuxseg4_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8181,9 +8067,9 @@ define void @test_vsuxseg4_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8199,9 +8085,9 @@ define void @test_vsuxseg4_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8214,9 +8100,9 @@ define void @test_vsuxseg4_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8232,10 +8118,10 @@ define void @test_vsuxseg5_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8248,10 +8134,10 @@ define void @test_vsuxseg5_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8267,10 +8153,10 @@ define void @test_vsuxseg5_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8283,10 +8169,10 @@ define void @test_vsuxseg5_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8302,10 +8188,10 @@ define void @test_vsuxseg5_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8318,10 +8204,10 @@ define void @test_vsuxseg5_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8337,11 +8223,11 @@ define void @test_vsuxseg6_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8354,11 +8240,11 @@ define void @test_vsuxseg6_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8374,11 +8260,11 @@ define void @test_vsuxseg6_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8391,11 +8277,11 @@ define void @test_vsuxseg6_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8411,11 +8297,11 @@ define void @test_vsuxseg6_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8428,11 +8314,11 @@ define void @test_vsuxseg6_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8448,12 +8334,12 @@ define void @test_vsuxseg7_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8466,12 +8352,12 @@ define void @test_vsuxseg7_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8487,12 +8373,12 @@ define void @test_vsuxseg7_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8505,12 +8391,12 @@ define void @test_vsuxseg7_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8526,12 +8412,12 @@ define void @test_vsuxseg7_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8544,12 +8430,12 @@ define void @test_vsuxseg7_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8565,13 +8451,13 @@ define void @test_vsuxseg8_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8584,13 +8470,13 @@ define void @test_vsuxseg8_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8606,13 +8492,13 @@ define void @test_vsuxseg8_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8625,13 +8511,13 @@ define void @test_vsuxseg8_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8647,13 +8533,13 @@ define void @test_vsuxseg8_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8666,13 +8552,13 @@ define void @test_vsuxseg8_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8687,7 +8573,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vsc
 define void @test_vsuxseg2_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -8701,7 +8586,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -8718,7 +8602,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vsca
 define void @test_vsuxseg2_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -8732,7 +8615,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -8749,7 +8631,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vsc
 define void @test_vsuxseg2_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -8763,7 +8644,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -8781,8 +8661,8 @@ define void @test_vsuxseg3_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8795,8 +8675,8 @@ define void @test_vsuxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8812,8 +8692,8 @@ define void @test_vsuxseg3_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8826,8 +8706,8 @@ define void @test_vsuxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8843,8 +8723,8 @@ define void @test_vsuxseg3_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8857,8 +8737,8 @@ define void @test_vsuxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8874,9 +8754,9 @@ define void @test_vsuxseg4_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8889,9 +8769,9 @@ define void @test_vsuxseg4_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8907,9 +8787,9 @@ define void @test_vsuxseg4_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8922,9 +8802,9 @@ define void @test_vsuxseg4_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8940,9 +8820,9 @@ define void @test_vsuxseg4_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8955,9 +8835,9 @@ define void @test_vsuxseg4_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8973,10 +8853,10 @@ define void @test_vsuxseg5_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8989,10 +8869,10 @@ define void @test_vsuxseg5_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9008,10 +8888,10 @@ define void @test_vsuxseg5_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9024,10 +8904,10 @@ define void @test_vsuxseg5_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9043,10 +8923,10 @@ define void @test_vsuxseg5_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9059,10 +8939,10 @@ define void @test_vsuxseg5_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9078,11 +8958,11 @@ define void @test_vsuxseg6_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9095,11 +8975,11 @@ define void @test_vsuxseg6_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9115,11 +8995,11 @@ define void @test_vsuxseg6_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9132,11 +9012,11 @@ define void @test_vsuxseg6_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9152,11 +9032,11 @@ define void @test_vsuxseg6_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9169,11 +9049,11 @@ define void @test_vsuxseg6_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9189,12 +9069,12 @@ define void @test_vsuxseg7_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9207,12 +9087,12 @@ define void @test_vsuxseg7_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9228,12 +9108,12 @@ define void @test_vsuxseg7_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9246,12 +9126,12 @@ define void @test_vsuxseg7_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9267,12 +9147,12 @@ define void @test_vsuxseg7_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9285,12 +9165,12 @@ define void @test_vsuxseg7_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9306,13 +9186,13 @@ define void @test_vsuxseg8_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9325,13 +9205,13 @@ define void @test_vsuxseg8_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9347,13 +9227,13 @@ define void @test_vsuxseg8_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9366,13 +9246,13 @@ define void @test_vsuxseg8_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9388,13 +9268,13 @@ define void @test_vsuxseg8_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9407,13 +9287,13 @@ define void @test_vsuxseg8_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9428,7 +9308,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscal
 define void @test_vsuxseg2_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -9442,7 +9321,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -9459,7 +9337,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vsca
 define void @test_vsuxseg2_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -9473,7 +9350,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -9490,7 +9366,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vsca
 define void @test_vsuxseg2_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -9504,7 +9379,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -9522,8 +9396,8 @@ define void @test_vsuxseg3_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9536,8 +9410,8 @@ define void @test_vsuxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9553,8 +9427,8 @@ define void @test_vsuxseg3_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9567,8 +9441,8 @@ define void @test_vsuxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9584,8 +9458,8 @@ define void @test_vsuxseg3_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9598,8 +9472,8 @@ define void @test_vsuxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9615,9 +9489,9 @@ define void @test_vsuxseg4_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9630,9 +9504,9 @@ define void @test_vsuxseg4_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9648,9 +9522,9 @@ define void @test_vsuxseg4_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9663,9 +9537,9 @@ define void @test_vsuxseg4_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9681,9 +9555,9 @@ define void @test_vsuxseg4_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9696,9 +9570,9 @@ define void @test_vsuxseg4_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9714,10 +9588,10 @@ define void @test_vsuxseg5_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9730,10 +9604,10 @@ define void @test_vsuxseg5_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9749,10 +9623,10 @@ define void @test_vsuxseg5_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9765,10 +9639,10 @@ define void @test_vsuxseg5_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9784,10 +9658,10 @@ define void @test_vsuxseg5_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9800,10 +9674,10 @@ define void @test_vsuxseg5_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9819,11 +9693,11 @@ define void @test_vsuxseg6_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9836,11 +9710,11 @@ define void @test_vsuxseg6_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9856,11 +9730,11 @@ define void @test_vsuxseg6_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9873,11 +9747,11 @@ define void @test_vsuxseg6_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9893,11 +9767,11 @@ define void @test_vsuxseg6_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9910,11 +9784,11 @@ define void @test_vsuxseg6_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9930,12 +9804,12 @@ define void @test_vsuxseg7_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9948,12 +9822,12 @@ define void @test_vsuxseg7_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9969,12 +9843,12 @@ define void @test_vsuxseg7_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9987,12 +9861,12 @@ define void @test_vsuxseg7_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10008,12 +9882,12 @@ define void @test_vsuxseg7_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10026,12 +9900,12 @@ define void @test_vsuxseg7_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10047,13 +9921,13 @@ define void @test_vsuxseg8_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10066,13 +9940,13 @@ define void @test_vsuxseg8_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10088,13 +9962,13 @@ define void @test_vsuxseg8_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10107,13 +9981,13 @@ define void @test_vsuxseg8_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10129,13 +10003,13 @@ define void @test_vsuxseg8_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10148,13 +10022,13 @@ define void @test_vsuxseg8_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10169,7 +10043,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vsca
 define void @test_vsuxseg2_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -10183,7 +10056,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -10200,7 +10072,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vsc
 define void @test_vsuxseg2_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -10214,7 +10085,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -10231,7 +10101,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vsc
 define void @test_vsuxseg2_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -10245,7 +10114,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -10263,8 +10131,8 @@ define void @test_vsuxseg3_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10277,8 +10145,8 @@ define void @test_vsuxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10294,8 +10162,8 @@ define void @test_vsuxseg3_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10308,8 +10176,8 @@ define void @test_vsuxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10325,8 +10193,8 @@ define void @test_vsuxseg3_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10339,8 +10207,8 @@ define void @test_vsuxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10356,9 +10224,9 @@ define void @test_vsuxseg4_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10371,9 +10239,9 @@ define void @test_vsuxseg4_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10389,9 +10257,9 @@ define void @test_vsuxseg4_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10404,9 +10272,9 @@ define void @test_vsuxseg4_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10422,9 +10290,9 @@ define void @test_vsuxseg4_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10437,9 +10305,9 @@ define void @test_vsuxseg4_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10455,10 +10323,10 @@ define void @test_vsuxseg5_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10471,10 +10339,10 @@ define void @test_vsuxseg5_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10490,10 +10358,10 @@ define void @test_vsuxseg5_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10506,10 +10374,10 @@ define void @test_vsuxseg5_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10525,10 +10393,10 @@ define void @test_vsuxseg5_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10541,10 +10409,10 @@ define void @test_vsuxseg5_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10560,11 +10428,11 @@ define void @test_vsuxseg6_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10577,11 +10445,11 @@ define void @test_vsuxseg6_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10597,11 +10465,11 @@ define void @test_vsuxseg6_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10614,11 +10482,11 @@ define void @test_vsuxseg6_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10634,11 +10502,11 @@ define void @test_vsuxseg6_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10651,11 +10519,11 @@ define void @test_vsuxseg6_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10671,12 +10539,12 @@ define void @test_vsuxseg7_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10689,12 +10557,12 @@ define void @test_vsuxseg7_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10710,12 +10578,12 @@ define void @test_vsuxseg7_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10728,12 +10596,12 @@ define void @test_vsuxseg7_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10749,12 +10617,12 @@ define void @test_vsuxseg7_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10767,12 +10635,12 @@ define void @test_vsuxseg7_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10788,13 +10656,13 @@ define void @test_vsuxseg8_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10807,13 +10675,13 @@ define void @test_vsuxseg8_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10829,13 +10697,13 @@ define void @test_vsuxseg8_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10848,13 +10716,13 @@ define void @test_vsuxseg8_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10870,13 +10738,13 @@ define void @test_vsuxseg8_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10889,13 +10757,13 @@ define void @test_vsuxseg8_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10910,7 +10778,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half>,<vsca
 define void @test_vsuxseg2_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -10924,7 +10791,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -10941,7 +10807,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscal
 define void @test_vsuxseg2_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -10955,7 +10820,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -10972,7 +10836,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half>,<vsca
 define void @test_vsuxseg2_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
@@ -10985,7 +10848,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
@@ -11002,8 +10864,8 @@ define void @test_vsuxseg3_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11016,8 +10878,8 @@ define void @test_vsuxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11033,8 +10895,8 @@ define void @test_vsuxseg3_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11047,8 +10909,8 @@ define void @test_vsuxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11063,11 +10925,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half>,<vsca
 define void @test_vsuxseg3_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl)
@@ -11077,11 +10939,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl)
@@ -11095,9 +10957,9 @@ define void @test_vsuxseg4_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11110,9 +10972,9 @@ define void @test_vsuxseg4_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11128,9 +10990,9 @@ define void @test_vsuxseg4_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11143,9 +11005,9 @@ define void @test_vsuxseg4_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11161,9 +11023,9 @@ define void @test_vsuxseg4_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -11176,9 +11038,9 @@ define void @test_vsuxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -11193,7 +11055,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float>,<vsc
 define void @test_vsuxseg2_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -11207,7 +11068,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -11224,7 +11084,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float>,<vsca
 define void @test_vsuxseg2_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -11238,7 +11097,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -11255,7 +11113,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float>,<vsc
 define void @test_vsuxseg2_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -11269,7 +11126,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -11286,7 +11142,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double>,<vs
 define void @test_vsuxseg2_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11300,7 +11155,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11317,7 +11171,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double>,<vsc
 define void @test_vsuxseg2_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11331,7 +11184,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11348,7 +11200,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double>,<vs
 define void @test_vsuxseg2_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11362,7 +11213,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11380,8 +11230,8 @@ define void @test_vsuxseg3_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11394,8 +11244,8 @@ define void @test_vsuxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11411,8 +11261,8 @@ define void @test_vsuxseg3_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11425,8 +11275,8 @@ define void @test_vsuxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11442,8 +11292,8 @@ define void @test_vsuxseg3_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11456,8 +11306,8 @@ define void @test_vsuxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11473,9 +11323,9 @@ define void @test_vsuxseg4_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11488,9 +11338,9 @@ define void @test_vsuxseg4_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11506,9 +11356,9 @@ define void @test_vsuxseg4_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11521,9 +11371,9 @@ define void @test_vsuxseg4_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11539,9 +11389,9 @@ define void @test_vsuxseg4_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11554,9 +11404,9 @@ define void @test_vsuxseg4_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11571,7 +11421,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vsca
 define void @test_vsuxseg2_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -11585,7 +11434,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -11602,7 +11450,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscal
 define void @test_vsuxseg2_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -11616,7 +11463,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -11633,7 +11479,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vsca
 define void @test_vsuxseg2_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
@@ -11646,7 +11491,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
@@ -11663,8 +11507,8 @@ define void @test_vsuxseg3_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11677,8 +11521,8 @@ define void @test_vsuxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11694,8 +11538,8 @@ define void @test_vsuxseg3_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11708,8 +11552,8 @@ define void @test_vsuxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11724,11 +11568,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vsca
 define void @test_vsuxseg3_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl)
@@ -11738,11 +11582,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl)
@@ -11756,9 +11600,9 @@ define void @test_vsuxseg4_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11771,9 +11615,9 @@ define void @test_vsuxseg4_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11789,9 +11633,9 @@ define void @test_vsuxseg4_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11804,9 +11648,9 @@ define void @test_vsuxseg4_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11822,9 +11666,9 @@ define void @test_vsuxseg4_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11837,9 +11681,9 @@ define void @test_vsuxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11855,10 +11699,10 @@ define void @test_vsuxseg5_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11871,10 +11715,10 @@ define void @test_vsuxseg5_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11890,10 +11734,10 @@ define void @test_vsuxseg5_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11906,10 +11750,10 @@ define void @test_vsuxseg5_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11925,10 +11769,10 @@ define void @test_vsuxseg5_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11941,10 +11785,10 @@ define void @test_vsuxseg5_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11960,11 +11804,11 @@ define void @test_vsuxseg6_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11977,11 +11821,11 @@ define void @test_vsuxseg6_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11997,11 +11841,11 @@ define void @test_vsuxseg6_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12014,11 +11858,11 @@ define void @test_vsuxseg6_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12034,11 +11878,11 @@ define void @test_vsuxseg6_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -12051,11 +11895,11 @@ define void @test_vsuxseg6_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -12071,12 +11915,12 @@ define void @test_vsuxseg7_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12089,12 +11933,12 @@ define void @test_vsuxseg7_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12110,12 +11954,12 @@ define void @test_vsuxseg7_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12128,12 +11972,12 @@ define void @test_vsuxseg7_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12149,12 +11993,12 @@ define void @test_vsuxseg7_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -12167,12 +12011,12 @@ define void @test_vsuxseg7_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -12188,13 +12032,13 @@ define void @test_vsuxseg8_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12207,13 +12051,13 @@ define void @test_vsuxseg8_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12229,13 +12073,13 @@ define void @test_vsuxseg8_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12248,13 +12092,13 @@ define void @test_vsuxseg8_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12270,13 +12114,13 @@ define void @test_vsuxseg8_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -12289,13 +12133,13 @@ define void @test_vsuxseg8_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -12310,7 +12154,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vsca
 define void @test_vsuxseg2_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -12324,7 +12167,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -12341,7 +12183,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscal
 define void @test_vsuxseg2_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -12355,7 +12196,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -12372,7 +12212,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vsca
 define void @test_vsuxseg2_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -12386,7 +12225,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -12404,8 +12242,8 @@ define void @test_vsuxseg3_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12418,8 +12256,8 @@ define void @test_vsuxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12435,8 +12273,8 @@ define void @test_vsuxseg3_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12449,8 +12287,8 @@ define void @test_vsuxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12466,8 +12304,8 @@ define void @test_vsuxseg3_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12480,8 +12318,8 @@ define void @test_vsuxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12497,9 +12335,9 @@ define void @test_vsuxseg4_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12512,9 +12350,9 @@ define void @test_vsuxseg4_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12530,9 +12368,9 @@ define void @test_vsuxseg4_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12545,9 +12383,9 @@ define void @test_vsuxseg4_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12563,9 +12401,9 @@ define void @test_vsuxseg4_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12578,9 +12416,9 @@ define void @test_vsuxseg4_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12596,10 +12434,10 @@ define void @test_vsuxseg5_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12612,10 +12450,10 @@ define void @test_vsuxseg5_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12631,10 +12469,10 @@ define void @test_vsuxseg5_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12647,10 +12485,10 @@ define void @test_vsuxseg5_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12666,10 +12504,10 @@ define void @test_vsuxseg5_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12682,10 +12520,10 @@ define void @test_vsuxseg5_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12701,11 +12539,11 @@ define void @test_vsuxseg6_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12718,11 +12556,11 @@ define void @test_vsuxseg6_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12738,11 +12576,11 @@ define void @test_vsuxseg6_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12755,11 +12593,11 @@ define void @test_vsuxseg6_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12775,11 +12613,11 @@ define void @test_vsuxseg6_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12792,11 +12630,11 @@ define void @test_vsuxseg6_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12812,12 +12650,12 @@ define void @test_vsuxseg7_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12830,12 +12668,12 @@ define void @test_vsuxseg7_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12851,12 +12689,12 @@ define void @test_vsuxseg7_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12869,12 +12707,12 @@ define void @test_vsuxseg7_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12890,12 +12728,12 @@ define void @test_vsuxseg7_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12908,12 +12746,12 @@ define void @test_vsuxseg7_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12929,13 +12767,13 @@ define void @test_vsuxseg8_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12948,13 +12786,13 @@ define void @test_vsuxseg8_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12970,13 +12808,13 @@ define void @test_vsuxseg8_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12989,13 +12827,13 @@ define void @test_vsuxseg8_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13011,13 +12849,13 @@ define void @test_vsuxseg8_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13030,13 +12868,13 @@ define void @test_vsuxseg8_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13051,7 +12889,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float>,<vsc
 define void @test_vsuxseg2_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -13065,7 +12902,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -13082,7 +12918,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float>,<vsca
 define void @test_vsuxseg2_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -13096,7 +12931,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -13113,7 +12947,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float>,<vsc
 define void @test_vsuxseg2_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -13127,7 +12960,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i32 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -13145,8 +12977,8 @@ define void @test_vsuxseg3_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13159,8 +12991,8 @@ define void @test_vsuxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13176,8 +13008,8 @@ define void @test_vsuxseg3_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13190,8 +13022,8 @@ define void @test_vsuxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13207,8 +13039,8 @@ define void @test_vsuxseg3_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13221,8 +13053,8 @@ define void @test_vsuxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13238,9 +13070,9 @@ define void @test_vsuxseg4_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13253,9 +13085,9 @@ define void @test_vsuxseg4_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13271,9 +13103,9 @@ define void @test_vsuxseg4_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13286,9 +13118,9 @@ define void @test_vsuxseg4_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13304,9 +13136,9 @@ define void @test_vsuxseg4_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13319,9 +13151,9 @@ define void @test_vsuxseg4_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll
index f4670bbcd3e9a..9cd5c28092333 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsuxseg-rv64.ll
@@ -8,7 +8,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16>,<vs
 define void @test_vsuxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -22,7 +21,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -39,7 +37,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i8(<vscale x 16 x i16>,<vsc
 define void @test_vsuxseg2_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -53,7 +50,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16i16_nxv16i8(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -70,7 +66,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i32(<vscale x 16 x i16>,<vs
 define void @test_vsuxseg2_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
@@ -83,7 +78,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16i16_nxv16i32(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
@@ -99,7 +93,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i32(<vscale x 4 x i32>,<vscal
 define void @test_vsuxseg2_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -113,7 +106,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -130,7 +122,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i8(<vscale x 4 x i32>,<vscale
 define void @test_vsuxseg2_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -144,7 +135,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -161,7 +151,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscal
 define void @test_vsuxseg2_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
@@ -174,7 +163,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
@@ -190,7 +178,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i32.nxv4i16(<vscale x 4 x i32>,<vscal
 define void @test_vsuxseg2_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -204,7 +191,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -222,8 +208,8 @@ define void @test_vsuxseg3_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -236,8 +222,8 @@ define void @test_vsuxseg3_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -253,8 +239,8 @@ define void @test_vsuxseg3_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -267,8 +253,8 @@ define void @test_vsuxseg3_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -283,11 +269,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32>,<vscal
 define void @test_vsuxseg3_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei64.v v16, (a0), v12
+; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
@@ -297,11 +283,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv4i32.nxv4i64(<vscale x 4 x i32> %val,<vscale x 4 x i32> %val,<vscale x 4 x i32> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
@@ -315,8 +301,8 @@ define void @test_vsuxseg3_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -329,8 +315,8 @@ define void @test_vsuxseg3_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -346,9 +332,9 @@ define void @test_vsuxseg4_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -361,9 +347,9 @@ define void @test_vsuxseg4_mask_nxv4i32_nxv4i32(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -379,9 +365,9 @@ define void @test_vsuxseg4_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -394,9 +380,9 @@ define void @test_vsuxseg4_mask_nxv4i32_nxv4i8(<vscale x 4 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -412,9 +398,9 @@ define void @test_vsuxseg4_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -427,9 +413,9 @@ define void @test_vsuxseg4_mask_nxv4i32_nxv4i64(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -445,9 +431,9 @@ define void @test_vsuxseg4_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -460,9 +446,9 @@ define void @test_vsuxseg4_mask_nxv4i32_nxv4i16(<vscale x 4 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -477,7 +463,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vsca
 define void @test_vsuxseg2_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12
@@ -490,7 +475,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v12, v0.t
@@ -506,7 +490,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i8(<vscale x 16 x i8>,<vscal
 define void @test_vsuxseg2_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -520,7 +503,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -537,7 +519,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vsca
 define void @test_vsuxseg2_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
@@ -550,7 +531,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
@@ -566,11 +546,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8>,<vsca
 define void @test_vsuxseg3_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei16.v v16, (a0), v12
+; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl)
@@ -580,11 +560,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei16.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i16(<vscale x 16 x i8> %val,<vscale x 16 x i8> %val,<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl)
@@ -598,8 +578,8 @@ define void @test_vsuxseg3_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -612,8 +592,8 @@ define void @test_vsuxseg3_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -628,7 +608,6 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vsca
 define void @test_vsuxseg3_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -642,7 +621,6 @@ entry:
 define void @test_vsuxseg3_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
@@ -660,9 +638,9 @@ define void @test_vsuxseg4_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -675,9 +653,9 @@ define void @test_vsuxseg4_mask_nxv16i8_nxv16i16(<vscale x 16 x i8> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -693,9 +671,9 @@ define void @test_vsuxseg4_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -708,9 +686,9 @@ define void @test_vsuxseg4_mask_nxv16i8_nxv16i8(<vscale x 16 x i8> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -725,7 +703,6 @@ declare void @llvm.riscv.vsuxseg4.mask.nxv16i8.nxv16i32(<vscale x 16 x i8>,<vsca
 define void @test_vsuxseg4_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -740,7 +717,6 @@ entry:
 define void @test_vsuxseg4_mask_nxv16i8_nxv16i32(<vscale x 16 x i8> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv16i8_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -758,7 +734,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i64(<vscale x 1 x i64>,<vscal
 define void @test_vsuxseg2_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -772,7 +747,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -789,7 +763,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i32(<vscale x 1 x i64>,<vscal
 define void @test_vsuxseg2_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -803,7 +776,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -820,7 +792,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i16(<vscale x 1 x i64>,<vscal
 define void @test_vsuxseg2_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -834,7 +805,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -851,7 +821,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i64.nxv1i8(<vscale x 1 x i64>,<vscale
 define void @test_vsuxseg2_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -865,7 +834,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -883,8 +851,8 @@ define void @test_vsuxseg3_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -897,8 +865,8 @@ define void @test_vsuxseg3_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -914,8 +882,8 @@ define void @test_vsuxseg3_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -928,8 +896,8 @@ define void @test_vsuxseg3_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -945,8 +913,8 @@ define void @test_vsuxseg3_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -959,8 +927,8 @@ define void @test_vsuxseg3_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -976,8 +944,8 @@ define void @test_vsuxseg3_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg3_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -990,8 +958,8 @@ define void @test_vsuxseg3_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1007,9 +975,9 @@ define void @test_vsuxseg4_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1022,9 +990,9 @@ define void @test_vsuxseg4_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1040,9 +1008,9 @@ define void @test_vsuxseg4_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1055,9 +1023,9 @@ define void @test_vsuxseg4_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1073,9 +1041,9 @@ define void @test_vsuxseg4_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1088,9 +1056,9 @@ define void @test_vsuxseg4_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1106,9 +1074,9 @@ define void @test_vsuxseg4_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg4_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1121,9 +1089,9 @@ define void @test_vsuxseg4_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1139,10 +1107,10 @@ define void @test_vsuxseg5_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1155,10 +1123,10 @@ define void @test_vsuxseg5_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1174,10 +1142,10 @@ define void @test_vsuxseg5_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1190,10 +1158,10 @@ define void @test_vsuxseg5_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1209,10 +1177,10 @@ define void @test_vsuxseg5_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1225,10 +1193,10 @@ define void @test_vsuxseg5_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1244,10 +1212,10 @@ define void @test_vsuxseg5_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg5_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1260,10 +1228,10 @@ define void @test_vsuxseg5_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1279,11 +1247,11 @@ define void @test_vsuxseg6_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1296,11 +1264,11 @@ define void @test_vsuxseg6_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1316,11 +1284,11 @@ define void @test_vsuxseg6_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1333,11 +1301,11 @@ define void @test_vsuxseg6_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1353,11 +1321,11 @@ define void @test_vsuxseg6_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1370,11 +1338,11 @@ define void @test_vsuxseg6_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1390,11 +1358,11 @@ define void @test_vsuxseg6_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg6_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1407,11 +1375,11 @@ define void @test_vsuxseg6_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1427,12 +1395,12 @@ define void @test_vsuxseg7_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1445,12 +1413,12 @@ define void @test_vsuxseg7_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1466,12 +1434,12 @@ define void @test_vsuxseg7_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1484,12 +1452,12 @@ define void @test_vsuxseg7_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1505,12 +1473,12 @@ define void @test_vsuxseg7_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1523,12 +1491,12 @@ define void @test_vsuxseg7_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1544,15 +1512,15 @@ define void @test_vsuxseg7_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg7_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
-; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
-; CHECK-NEXT:    ret
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
+; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
+; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg7.nxv1i64.nxv1i8(<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val,<vscale x 1 x i64> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl)
   ret void
@@ -1562,12 +1530,12 @@ define void @test_vsuxseg7_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1583,13 +1551,13 @@ define void @test_vsuxseg8_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1602,13 +1570,13 @@ define void @test_vsuxseg8_mask_nxv1i64_nxv1i64(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1624,13 +1592,13 @@ define void @test_vsuxseg8_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1643,13 +1611,13 @@ define void @test_vsuxseg8_mask_nxv1i64_nxv1i32(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1665,13 +1633,13 @@ define void @test_vsuxseg8_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1684,13 +1652,13 @@ define void @test_vsuxseg8_mask_nxv1i64_nxv1i16(<vscale x 1 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1706,13 +1674,13 @@ define void @test_vsuxseg8_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg8_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1725,13 +1693,13 @@ define void @test_vsuxseg8_mask_nxv1i64_nxv1i8(<vscale x 1 x i64> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1746,7 +1714,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i64(<vscale x 1 x i32>,<vscal
 define void @test_vsuxseg2_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1760,7 +1727,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1777,7 +1743,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i32(<vscale x 1 x i32>,<vscal
 define void @test_vsuxseg2_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1791,7 +1756,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1808,7 +1772,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i16(<vscale x 1 x i32>,<vscal
 define void @test_vsuxseg2_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1822,7 +1785,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1839,7 +1801,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i32.nxv1i8(<vscale x 1 x i32>,<vscale
 define void @test_vsuxseg2_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1853,7 +1814,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -1871,8 +1831,8 @@ define void @test_vsuxseg3_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1885,8 +1845,8 @@ define void @test_vsuxseg3_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1902,8 +1862,8 @@ define void @test_vsuxseg3_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1916,8 +1876,8 @@ define void @test_vsuxseg3_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1933,8 +1893,8 @@ define void @test_vsuxseg3_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1947,8 +1907,8 @@ define void @test_vsuxseg3_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1964,8 +1924,8 @@ define void @test_vsuxseg3_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg3_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -1978,8 +1938,8 @@ define void @test_vsuxseg3_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -1995,9 +1955,9 @@ define void @test_vsuxseg4_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2010,9 +1970,9 @@ define void @test_vsuxseg4_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2028,9 +1988,9 @@ define void @test_vsuxseg4_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2043,9 +2003,9 @@ define void @test_vsuxseg4_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2061,9 +2021,9 @@ define void @test_vsuxseg4_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2076,9 +2036,9 @@ define void @test_vsuxseg4_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2094,9 +2054,9 @@ define void @test_vsuxseg4_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg4_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2109,9 +2069,9 @@ define void @test_vsuxseg4_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2127,10 +2087,10 @@ define void @test_vsuxseg5_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2143,10 +2103,10 @@ define void @test_vsuxseg5_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2162,10 +2122,10 @@ define void @test_vsuxseg5_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2178,10 +2138,10 @@ define void @test_vsuxseg5_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2197,10 +2157,10 @@ define void @test_vsuxseg5_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2213,10 +2173,10 @@ define void @test_vsuxseg5_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2232,10 +2192,10 @@ define void @test_vsuxseg5_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg5_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2248,10 +2208,10 @@ define void @test_vsuxseg5_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2267,11 +2227,11 @@ define void @test_vsuxseg6_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2284,11 +2244,11 @@ define void @test_vsuxseg6_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2304,11 +2264,11 @@ define void @test_vsuxseg6_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2321,11 +2281,11 @@ define void @test_vsuxseg6_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2341,11 +2301,11 @@ define void @test_vsuxseg6_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2358,11 +2318,11 @@ define void @test_vsuxseg6_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2378,11 +2338,11 @@ define void @test_vsuxseg6_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg6_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2395,11 +2355,11 @@ define void @test_vsuxseg6_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2415,12 +2375,12 @@ define void @test_vsuxseg7_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2433,12 +2393,12 @@ define void @test_vsuxseg7_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2454,12 +2414,12 @@ define void @test_vsuxseg7_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2472,12 +2432,12 @@ define void @test_vsuxseg7_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2493,12 +2453,12 @@ define void @test_vsuxseg7_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2511,16 +2471,16 @@ define void @test_vsuxseg7_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
-; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
+; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+entry:
   tail call void @llvm.riscv.vsuxseg7.mask.nxv1i32.nxv1i16(<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val,<vscale x 1 x i32> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl)
   ret void
 }
@@ -2532,12 +2492,12 @@ define void @test_vsuxseg7_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg7_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2550,12 +2510,12 @@ define void @test_vsuxseg7_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2571,13 +2531,13 @@ define void @test_vsuxseg8_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2590,13 +2550,13 @@ define void @test_vsuxseg8_mask_nxv1i32_nxv1i64(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2612,13 +2572,13 @@ define void @test_vsuxseg8_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2631,13 +2591,13 @@ define void @test_vsuxseg8_mask_nxv1i32_nxv1i32(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2653,13 +2613,13 @@ define void @test_vsuxseg8_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2672,13 +2632,13 @@ define void @test_vsuxseg8_mask_nxv1i32_nxv1i16(<vscale x 1 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2694,13 +2654,13 @@ define void @test_vsuxseg8_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg8_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -2713,13 +2673,13 @@ define void @test_vsuxseg8_mask_nxv1i32_nxv1i8(<vscale x 1 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -2734,7 +2694,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i16(<vscale x 8 x i16>,<vscal
 define void @test_vsuxseg2_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2748,7 +2707,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2765,7 +2723,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i8(<vscale x 8 x i16>,<vscale
 define void @test_vsuxseg2_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2779,7 +2736,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2796,7 +2752,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscal
 define void @test_vsuxseg2_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
@@ -2809,7 +2764,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
@@ -2825,7 +2779,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscal
 define void @test_vsuxseg2_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
@@ -2838,7 +2791,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
@@ -2855,8 +2807,8 @@ define void @test_vsuxseg3_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -2869,8 +2821,8 @@ define void @test_vsuxseg3_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -2886,8 +2838,8 @@ define void @test_vsuxseg3_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -2900,8 +2852,8 @@ define void @test_vsuxseg3_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -2916,7 +2868,6 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscal
 define void @test_vsuxseg3_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2930,7 +2881,6 @@ entry:
 define void @test_vsuxseg3_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -2947,11 +2897,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16>,<vscal
 define void @test_vsuxseg3_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
@@ -2961,11 +2911,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv8i16.nxv8i32(<vscale x 8 x i16> %val,<vscale x 8 x i16> %val,<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
@@ -2979,9 +2929,9 @@ define void @test_vsuxseg4_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -2994,9 +2944,9 @@ define void @test_vsuxseg4_mask_nxv8i16_nxv8i16(<vscale x 8 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3012,9 +2962,9 @@ define void @test_vsuxseg4_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3027,9 +2977,9 @@ define void @test_vsuxseg4_mask_nxv8i16_nxv8i8(<vscale x 8 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3044,7 +2994,6 @@ declare void @llvm.riscv.vsuxseg4.mask.nxv8i16.nxv8i64(<vscale x 8 x i16>,<vscal
 define void @test_vsuxseg4_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -3059,7 +3008,6 @@ entry:
 define void @test_vsuxseg4_mask_nxv8i16_nxv8i64(<vscale x 8 x i16> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -3078,9 +3026,9 @@ define void @test_vsuxseg4_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -3093,9 +3041,9 @@ define void @test_vsuxseg4_mask_nxv8i16_nxv8i32(<vscale x 8 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -3110,7 +3058,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale
 define void @test_vsuxseg2_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
@@ -3123,7 +3070,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
@@ -3139,7 +3085,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i8(<vscale x 4 x i8>,<vscale x
 define void @test_vsuxseg2_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3153,7 +3098,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3170,7 +3114,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale
 define void @test_vsuxseg2_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
@@ -3183,7 +3126,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
@@ -3199,7 +3141,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i8.nxv4i16(<vscale x 4 x i8>,<vscale
 define void @test_vsuxseg2_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3213,7 +3154,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3230,11 +3170,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8>,<vscale
 define void @test_vsuxseg3_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
@@ -3244,11 +3184,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i32(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
@@ -3262,8 +3202,8 @@ define void @test_vsuxseg3_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3276,8 +3216,8 @@ define void @test_vsuxseg3_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3292,7 +3232,6 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale
 define void @test_vsuxseg3_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3306,7 +3245,6 @@ entry:
 define void @test_vsuxseg3_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
@@ -3324,8 +3262,8 @@ define void @test_vsuxseg3_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg3_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3338,8 +3276,8 @@ define void @test_vsuxseg3_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3355,9 +3293,9 @@ define void @test_vsuxseg4_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3370,9 +3308,9 @@ define void @test_vsuxseg4_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3388,9 +3326,9 @@ define void @test_vsuxseg4_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3403,9 +3341,9 @@ define void @test_vsuxseg4_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3420,7 +3358,6 @@ declare void @llvm.riscv.vsuxseg4.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale
 define void @test_vsuxseg4_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3435,7 +3372,6 @@ entry:
 define void @test_vsuxseg4_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -3454,9 +3390,9 @@ define void @test_vsuxseg4_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg4_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3469,9 +3405,9 @@ define void @test_vsuxseg4_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3487,10 +3423,10 @@ define void @test_vsuxseg5_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3503,10 +3439,10 @@ define void @test_vsuxseg5_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3522,10 +3458,10 @@ define void @test_vsuxseg5_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3538,10 +3474,10 @@ define void @test_vsuxseg5_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3556,13 +3492,13 @@ declare void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8>,<vscale
 define void @test_vsuxseg5_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vsuxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg5.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
@@ -3572,13 +3508,13 @@ entry:
 define void @test_vsuxseg5_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
-; CHECK-NEXT:    vsuxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg5.mask.nxv4i8.nxv4i64(<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val,<vscale x 4 x i8> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
@@ -3592,10 +3528,10 @@ define void @test_vsuxseg5_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg5_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3608,10 +3544,10 @@ define void @test_vsuxseg5_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3627,11 +3563,11 @@ define void @test_vsuxseg6_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3644,11 +3580,11 @@ define void @test_vsuxseg6_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3664,11 +3600,11 @@ define void @test_vsuxseg6_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3681,11 +3617,11 @@ define void @test_vsuxseg6_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3701,11 +3637,11 @@ define void @test_vsuxseg6_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -3718,11 +3654,11 @@ define void @test_vsuxseg6_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -3738,11 +3674,11 @@ define void @test_vsuxseg6_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3755,11 +3691,11 @@ define void @test_vsuxseg6_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3775,12 +3711,12 @@ define void @test_vsuxseg7_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3793,12 +3729,12 @@ define void @test_vsuxseg7_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3814,12 +3750,12 @@ define void @test_vsuxseg7_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3832,12 +3768,12 @@ define void @test_vsuxseg7_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3853,12 +3789,12 @@ define void @test_vsuxseg7_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -3871,12 +3807,12 @@ define void @test_vsuxseg7_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -3892,12 +3828,12 @@ define void @test_vsuxseg7_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3910,12 +3846,12 @@ define void @test_vsuxseg7_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -3931,13 +3867,13 @@ define void @test_vsuxseg8_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -3950,13 +3886,13 @@ define void @test_vsuxseg8_mask_nxv4i8_nxv4i32(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -3972,13 +3908,13 @@ define void @test_vsuxseg8_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -3991,13 +3927,13 @@ define void @test_vsuxseg8_mask_nxv4i8_nxv4i8(<vscale x 4 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4013,13 +3949,13 @@ define void @test_vsuxseg8_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -4032,13 +3968,13 @@ define void @test_vsuxseg8_mask_nxv4i8_nxv4i64(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -4054,13 +3990,13 @@ define void @test_vsuxseg8_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4073,13 +4009,13 @@ define void @test_vsuxseg8_mask_nxv4i8_nxv4i16(<vscale x 4 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i8_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4094,7 +4030,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i64(<vscale x 1 x i16>,<vscal
 define void @test_vsuxseg2_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4108,7 +4043,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4125,7 +4059,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i32(<vscale x 1 x i16>,<vscal
 define void @test_vsuxseg2_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4139,7 +4072,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4156,7 +4088,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i16(<vscale x 1 x i16>,<vscal
 define void @test_vsuxseg2_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4170,7 +4101,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4187,7 +4117,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i16.nxv1i8(<vscale x 1 x i16>,<vscale
 define void @test_vsuxseg2_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4201,7 +4130,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -4219,8 +4147,8 @@ define void @test_vsuxseg3_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4233,8 +4161,8 @@ define void @test_vsuxseg3_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4250,8 +4178,8 @@ define void @test_vsuxseg3_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4264,8 +4192,8 @@ define void @test_vsuxseg3_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4281,8 +4209,8 @@ define void @test_vsuxseg3_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4295,8 +4223,8 @@ define void @test_vsuxseg3_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4312,8 +4240,8 @@ define void @test_vsuxseg3_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg3_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4326,8 +4254,8 @@ define void @test_vsuxseg3_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4343,9 +4271,9 @@ define void @test_vsuxseg4_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4358,9 +4286,9 @@ define void @test_vsuxseg4_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4376,9 +4304,9 @@ define void @test_vsuxseg4_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4391,9 +4319,9 @@ define void @test_vsuxseg4_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4409,9 +4337,9 @@ define void @test_vsuxseg4_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4424,9 +4352,9 @@ define void @test_vsuxseg4_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4442,9 +4370,9 @@ define void @test_vsuxseg4_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg4_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4457,9 +4385,9 @@ define void @test_vsuxseg4_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4475,10 +4403,10 @@ define void @test_vsuxseg5_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4491,10 +4419,10 @@ define void @test_vsuxseg5_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4510,10 +4438,10 @@ define void @test_vsuxseg5_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4526,10 +4454,10 @@ define void @test_vsuxseg5_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4545,10 +4473,10 @@ define void @test_vsuxseg5_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4561,10 +4489,10 @@ define void @test_vsuxseg5_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4580,10 +4508,10 @@ define void @test_vsuxseg5_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg5_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4596,10 +4524,10 @@ define void @test_vsuxseg5_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4615,11 +4543,11 @@ define void @test_vsuxseg6_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4632,11 +4560,11 @@ define void @test_vsuxseg6_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4652,11 +4580,11 @@ define void @test_vsuxseg6_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4669,11 +4597,11 @@ define void @test_vsuxseg6_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4689,11 +4617,11 @@ define void @test_vsuxseg6_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4706,11 +4634,11 @@ define void @test_vsuxseg6_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4726,11 +4654,11 @@ define void @test_vsuxseg6_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg6_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4743,11 +4671,11 @@ define void @test_vsuxseg6_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4763,12 +4691,12 @@ define void @test_vsuxseg7_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4781,12 +4709,12 @@ define void @test_vsuxseg7_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4802,12 +4730,12 @@ define void @test_vsuxseg7_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4820,12 +4748,12 @@ define void @test_vsuxseg7_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4841,12 +4769,12 @@ define void @test_vsuxseg7_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4859,12 +4787,12 @@ define void @test_vsuxseg7_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4880,12 +4808,12 @@ define void @test_vsuxseg7_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg7_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4898,16 +4826,16 @@ define void @test_vsuxseg7_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
-; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
+; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
+; CHECK-NEXT:    ret
+entry:
   tail call void @llvm.riscv.vsuxseg7.mask.nxv1i16.nxv1i8(<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val,<vscale x 1 x i16> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl)
   ret void
 }
@@ -4919,13 +4847,13 @@ define void @test_vsuxseg8_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4938,13 +4866,13 @@ define void @test_vsuxseg8_mask_nxv1i16_nxv1i64(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -4960,13 +4888,13 @@ define void @test_vsuxseg8_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -4979,13 +4907,13 @@ define void @test_vsuxseg8_mask_nxv1i16_nxv1i32(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5001,13 +4929,13 @@ define void @test_vsuxseg8_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5020,13 +4948,13 @@ define void @test_vsuxseg8_mask_nxv1i16_nxv1i16(<vscale x 1 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5042,13 +4970,13 @@ define void @test_vsuxseg8_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg8_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5061,13 +4989,13 @@ define void @test_vsuxseg8_mask_nxv1i16_nxv1i8(<vscale x 1 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5082,7 +5010,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i32(<vscale x 2 x i32>,<vscal
 define void @test_vsuxseg2_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -5096,7 +5023,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -5113,7 +5039,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i8(<vscale x 2 x i32>,<vscale
 define void @test_vsuxseg2_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -5127,7 +5052,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -5144,7 +5068,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i16(<vscale x 2 x i32>,<vscal
 define void @test_vsuxseg2_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -5158,7 +5081,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -5175,7 +5097,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscal
 define void @test_vsuxseg2_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10
@@ -5188,7 +5109,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10, v0.t
@@ -5205,8 +5125,8 @@ define void @test_vsuxseg3_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5219,8 +5139,8 @@ define void @test_vsuxseg3_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5236,8 +5156,8 @@ define void @test_vsuxseg3_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5250,8 +5170,8 @@ define void @test_vsuxseg3_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5267,8 +5187,8 @@ define void @test_vsuxseg3_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5281,8 +5201,8 @@ define void @test_vsuxseg3_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5297,11 +5217,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32>,<vscal
 define void @test_vsuxseg3_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vsuxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
@@ -5311,11 +5231,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vsuxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv2i32.nxv2i64(<vscale x 2 x i32> %val,<vscale x 2 x i32> %val,<vscale x 2 x i32> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
@@ -5329,9 +5249,9 @@ define void @test_vsuxseg4_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5344,9 +5264,9 @@ define void @test_vsuxseg4_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5362,9 +5282,9 @@ define void @test_vsuxseg4_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5377,9 +5297,9 @@ define void @test_vsuxseg4_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5395,9 +5315,9 @@ define void @test_vsuxseg4_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5410,9 +5330,9 @@ define void @test_vsuxseg4_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5428,9 +5348,9 @@ define void @test_vsuxseg4_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -5443,9 +5363,9 @@ define void @test_vsuxseg4_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -5461,10 +5381,10 @@ define void @test_vsuxseg5_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5477,10 +5397,10 @@ define void @test_vsuxseg5_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5496,10 +5416,10 @@ define void @test_vsuxseg5_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5512,10 +5432,10 @@ define void @test_vsuxseg5_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5531,10 +5451,10 @@ define void @test_vsuxseg5_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5547,10 +5467,10 @@ define void @test_vsuxseg5_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5566,10 +5486,10 @@ define void @test_vsuxseg5_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -5582,10 +5502,10 @@ define void @test_vsuxseg5_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -5601,11 +5521,11 @@ define void @test_vsuxseg6_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5618,11 +5538,11 @@ define void @test_vsuxseg6_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5638,11 +5558,11 @@ define void @test_vsuxseg6_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5655,11 +5575,11 @@ define void @test_vsuxseg6_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5675,11 +5595,11 @@ define void @test_vsuxseg6_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5692,11 +5612,11 @@ define void @test_vsuxseg6_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5712,11 +5632,11 @@ define void @test_vsuxseg6_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -5729,11 +5649,11 @@ define void @test_vsuxseg6_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -5749,12 +5669,12 @@ define void @test_vsuxseg7_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5767,12 +5687,12 @@ define void @test_vsuxseg7_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5788,12 +5708,12 @@ define void @test_vsuxseg7_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5806,12 +5726,12 @@ define void @test_vsuxseg7_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5827,12 +5747,12 @@ define void @test_vsuxseg7_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5845,12 +5765,12 @@ define void @test_vsuxseg7_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5866,12 +5786,12 @@ define void @test_vsuxseg7_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -5884,12 +5804,12 @@ define void @test_vsuxseg7_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -5905,13 +5825,13 @@ define void @test_vsuxseg8_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5924,13 +5844,13 @@ define void @test_vsuxseg8_mask_nxv2i32_nxv2i32(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5946,13 +5866,13 @@ define void @test_vsuxseg8_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -5965,13 +5885,13 @@ define void @test_vsuxseg8_mask_nxv2i32_nxv2i8(<vscale x 2 x i32> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -5987,13 +5907,13 @@ define void @test_vsuxseg8_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6006,13 +5926,13 @@ define void @test_vsuxseg8_mask_nxv2i32_nxv2i16(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6028,13 +5948,13 @@ define void @test_vsuxseg8_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -6047,13 +5967,13 @@ define void @test_vsuxseg8_mask_nxv2i32_nxv2i64(<vscale x 2 x i32> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -6068,7 +5988,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg2_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10
@@ -6081,7 +6000,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v10, v0.t
@@ -6097,7 +6015,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i8(<vscale x 8 x i8>,<vscale x
 define void @test_vsuxseg2_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -6111,7 +6028,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -6128,7 +6044,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg2_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
@@ -6141,7 +6056,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
@@ -6157,7 +6071,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg2_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
@@ -6170,7 +6083,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
@@ -6186,11 +6098,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg3_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10
+; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl)
@@ -6200,11 +6112,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsuxseg3ei16.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i16(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl)
@@ -6218,8 +6130,8 @@ define void @test_vsuxseg3_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6232,8 +6144,8 @@ define void @test_vsuxseg3_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6248,7 +6160,6 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg3_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -6262,7 +6173,6 @@ entry:
 define void @test_vsuxseg3_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -6279,7 +6189,6 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg3_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -6293,7 +6202,6 @@ entry:
 define void @test_vsuxseg3_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
@@ -6311,9 +6219,9 @@ define void @test_vsuxseg4_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -6326,9 +6234,9 @@ define void @test_vsuxseg4_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -6344,9 +6252,9 @@ define void @test_vsuxseg4_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6359,9 +6267,9 @@ define void @test_vsuxseg4_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6376,7 +6284,6 @@ declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg4_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6391,7 +6298,6 @@ entry:
 define void @test_vsuxseg4_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6409,7 +6315,6 @@ declare void @llvm.riscv.vsuxseg4.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg4_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6424,7 +6329,6 @@ entry:
 define void @test_vsuxseg4_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6443,10 +6347,10 @@ define void @test_vsuxseg5_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -6459,10 +6363,10 @@ define void @test_vsuxseg5_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -6478,10 +6382,10 @@ define void @test_vsuxseg5_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6494,10 +6398,10 @@ define void @test_vsuxseg5_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6512,7 +6416,6 @@ declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg5_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6528,7 +6431,6 @@ entry:
 define void @test_vsuxseg5_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6547,13 +6449,13 @@ declare void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg5_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg5_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsuxseg5ei32.v v16, (a0), v12
+; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg5.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
@@ -6563,13 +6465,13 @@ entry:
 define void @test_vsuxseg5_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
-; CHECK-NEXT:    vsuxseg5ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsuxseg5ei32.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg5.mask.nxv8i8.nxv8i32(<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val,<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
@@ -6583,11 +6485,11 @@ define void @test_vsuxseg6_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -6600,11 +6502,11 @@ define void @test_vsuxseg6_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -6620,11 +6522,11 @@ define void @test_vsuxseg6_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6637,11 +6539,11 @@ define void @test_vsuxseg6_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6656,7 +6558,6 @@ declare void @llvm.riscv.vsuxseg6.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg6_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6673,7 +6574,6 @@ entry:
 define void @test_vsuxseg6_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6694,11 +6594,11 @@ define void @test_vsuxseg6_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -6711,11 +6611,11 @@ define void @test_vsuxseg6_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -6731,12 +6631,12 @@ define void @test_vsuxseg7_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -6749,12 +6649,12 @@ define void @test_vsuxseg7_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -6770,12 +6670,12 @@ define void @test_vsuxseg7_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6788,12 +6688,12 @@ define void @test_vsuxseg7_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6808,7 +6708,6 @@ declare void @llvm.riscv.vsuxseg7.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg7_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6826,7 +6725,6 @@ entry:
 define void @test_vsuxseg7_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6848,12 +6746,12 @@ define void @test_vsuxseg7_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -6866,12 +6764,12 @@ define void @test_vsuxseg7_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -6887,13 +6785,13 @@ define void @test_vsuxseg8_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -6906,13 +6804,13 @@ define void @test_vsuxseg8_mask_nxv8i8_nxv8i16(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -6928,13 +6826,13 @@ define void @test_vsuxseg8_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -6947,13 +6845,13 @@ define void @test_vsuxseg8_mask_nxv8i8_nxv8i8(<vscale x 8 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -6968,7 +6866,6 @@ declare void @llvm.riscv.vsuxseg8.mask.nxv8i8.nxv8i64(<vscale x 8 x i8>,<vscale
 define void @test_vsuxseg8_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -6987,7 +6884,6 @@ entry:
 define void @test_vsuxseg8_mask_nxv8i8_nxv8i64(<vscale x 8 x i8> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11_v12_v13_v14_v15
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -7010,13 +6906,13 @@ define void @test_vsuxseg8_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -7029,13 +6925,13 @@ define void @test_vsuxseg8_mask_nxv8i8_nxv8i32(<vscale x 8 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv8i8_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -7050,7 +6946,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i32(<vscale x 4 x i64>,<vscal
 define void @test_vsuxseg2_nxv4i64_nxv4i32(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7064,7 +6959,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i64_nxv4i32(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7081,7 +6975,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i8(<vscale x 4 x i64>,<vscale
 define void @test_vsuxseg2_nxv4i64_nxv4i8(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7095,7 +6988,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i64_nxv4i8(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7112,7 +7004,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i64(<vscale x 4 x i64>,<vscal
 define void @test_vsuxseg2_nxv4i64_nxv4i64(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7126,7 +7017,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i64_nxv4i64(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7143,7 +7033,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i64.nxv4i16(<vscale x 4 x i64>,<vscal
 define void @test_vsuxseg2_nxv4i64_nxv4i16(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7157,7 +7046,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i64_nxv4i16(<vscale x 4 x i64> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -7174,7 +7062,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscal
 define void @test_vsuxseg2_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
@@ -7187,7 +7074,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
@@ -7203,7 +7089,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i8(<vscale x 4 x i16>,<vscale
 define void @test_vsuxseg2_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -7217,7 +7102,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -7234,7 +7118,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscal
 define void @test_vsuxseg2_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
@@ -7247,7 +7130,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
@@ -7263,7 +7145,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4i16.nxv4i16(<vscale x 4 x i16>,<vscal
 define void @test_vsuxseg2_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -7277,7 +7158,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -7294,11 +7174,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16>,<vscal
 define void @test_vsuxseg3_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
@@ -7308,11 +7188,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i32(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
@@ -7326,8 +7206,8 @@ define void @test_vsuxseg3_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7340,8 +7220,8 @@ define void @test_vsuxseg3_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7356,7 +7236,6 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscal
 define void @test_vsuxseg3_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -7370,7 +7249,6 @@ entry:
 define void @test_vsuxseg3_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -7388,8 +7266,8 @@ define void @test_vsuxseg3_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7402,8 +7280,8 @@ define void @test_vsuxseg3_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7419,9 +7297,9 @@ define void @test_vsuxseg4_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7434,9 +7312,9 @@ define void @test_vsuxseg4_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7452,9 +7330,9 @@ define void @test_vsuxseg4_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7467,9 +7345,9 @@ define void @test_vsuxseg4_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7484,7 +7362,6 @@ declare void @llvm.riscv.vsuxseg4.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscal
 define void @test_vsuxseg4_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -7499,7 +7376,6 @@ entry:
 define void @test_vsuxseg4_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -7518,9 +7394,9 @@ define void @test_vsuxseg4_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7533,9 +7409,9 @@ define void @test_vsuxseg4_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7551,10 +7427,10 @@ define void @test_vsuxseg5_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7567,10 +7443,10 @@ define void @test_vsuxseg5_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7586,10 +7462,10 @@ define void @test_vsuxseg5_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7602,10 +7478,10 @@ define void @test_vsuxseg5_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7620,13 +7496,13 @@ declare void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16>,<vscal
 define void @test_vsuxseg5_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsuxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg5.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
@@ -7636,13 +7512,13 @@ entry:
 define void @test_vsuxseg5_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsuxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg5.mask.nxv4i16.nxv4i64(<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val,<vscale x 4 x i16> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
@@ -7656,10 +7532,10 @@ define void @test_vsuxseg5_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7672,10 +7548,10 @@ define void @test_vsuxseg5_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7691,11 +7567,11 @@ define void @test_vsuxseg6_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7708,11 +7584,11 @@ define void @test_vsuxseg6_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7728,11 +7604,11 @@ define void @test_vsuxseg6_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7745,11 +7621,11 @@ define void @test_vsuxseg6_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7765,11 +7641,11 @@ define void @test_vsuxseg6_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -7782,11 +7658,11 @@ define void @test_vsuxseg6_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -7802,11 +7678,11 @@ define void @test_vsuxseg6_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7819,11 +7695,11 @@ define void @test_vsuxseg6_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7839,12 +7715,12 @@ define void @test_vsuxseg7_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -7857,12 +7733,12 @@ define void @test_vsuxseg7_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -7878,12 +7754,12 @@ define void @test_vsuxseg7_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7896,12 +7772,12 @@ define void @test_vsuxseg7_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7917,12 +7793,12 @@ define void @test_vsuxseg7_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -7935,12 +7811,12 @@ define void @test_vsuxseg7_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -7956,12 +7832,12 @@ define void @test_vsuxseg7_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -7974,12 +7850,12 @@ define void @test_vsuxseg7_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -7995,13 +7871,13 @@ define void @test_vsuxseg8_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -8014,13 +7890,13 @@ define void @test_vsuxseg8_mask_nxv4i16_nxv4i32(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -8036,13 +7912,13 @@ define void @test_vsuxseg8_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8055,13 +7931,13 @@ define void @test_vsuxseg8_mask_nxv4i16_nxv4i8(<vscale x 4 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8077,13 +7953,13 @@ define void @test_vsuxseg8_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -8096,13 +7972,13 @@ define void @test_vsuxseg8_mask_nxv4i16_nxv4i64(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -8118,13 +7994,13 @@ define void @test_vsuxseg8_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8137,13 +8013,13 @@ define void @test_vsuxseg8_mask_nxv4i16_nxv4i16(<vscale x 4 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4i16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8158,7 +8034,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i64(<vscale x 1 x i8>,<vscale
 define void @test_vsuxseg2_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -8172,7 +8047,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -8189,7 +8063,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i32(<vscale x 1 x i8>,<vscale
 define void @test_vsuxseg2_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -8203,7 +8076,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -8220,7 +8092,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i16(<vscale x 1 x i8>,<vscale
 define void @test_vsuxseg2_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -8234,7 +8105,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -8251,7 +8121,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1i8.nxv1i8(<vscale x 1 x i8>,<vscale x
 define void @test_vsuxseg2_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -8265,7 +8134,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
@@ -8283,8 +8151,8 @@ define void @test_vsuxseg3_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8297,8 +8165,8 @@ define void @test_vsuxseg3_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8314,8 +8182,8 @@ define void @test_vsuxseg3_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8328,8 +8196,8 @@ define void @test_vsuxseg3_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8345,8 +8213,8 @@ define void @test_vsuxseg3_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8359,8 +8227,8 @@ define void @test_vsuxseg3_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8376,8 +8244,8 @@ define void @test_vsuxseg3_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg3_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8390,8 +8258,8 @@ define void @test_vsuxseg3_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8407,9 +8275,9 @@ define void @test_vsuxseg4_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8422,9 +8290,9 @@ define void @test_vsuxseg4_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8440,9 +8308,9 @@ define void @test_vsuxseg4_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8455,9 +8323,9 @@ define void @test_vsuxseg4_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8473,9 +8341,9 @@ define void @test_vsuxseg4_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8488,9 +8356,9 @@ define void @test_vsuxseg4_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8506,9 +8374,9 @@ define void @test_vsuxseg4_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg4_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8521,9 +8389,9 @@ define void @test_vsuxseg4_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8539,10 +8407,10 @@ define void @test_vsuxseg5_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8555,10 +8423,10 @@ define void @test_vsuxseg5_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8574,10 +8442,10 @@ define void @test_vsuxseg5_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8590,10 +8458,10 @@ define void @test_vsuxseg5_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8609,10 +8477,10 @@ define void @test_vsuxseg5_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8625,10 +8493,10 @@ define void @test_vsuxseg5_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8644,10 +8512,10 @@ define void @test_vsuxseg5_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg5_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8660,10 +8528,10 @@ define void @test_vsuxseg5_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8679,11 +8547,11 @@ define void @test_vsuxseg6_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8696,11 +8564,11 @@ define void @test_vsuxseg6_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8716,11 +8584,11 @@ define void @test_vsuxseg6_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8733,11 +8601,11 @@ define void @test_vsuxseg6_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8753,11 +8621,11 @@ define void @test_vsuxseg6_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8770,11 +8638,11 @@ define void @test_vsuxseg6_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8790,11 +8658,11 @@ define void @test_vsuxseg6_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg6_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8807,11 +8675,11 @@ define void @test_vsuxseg6_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8827,12 +8695,12 @@ define void @test_vsuxseg7_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8845,12 +8713,12 @@ define void @test_vsuxseg7_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8866,12 +8734,12 @@ define void @test_vsuxseg7_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8884,12 +8752,12 @@ define void @test_vsuxseg7_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8905,12 +8773,12 @@ define void @test_vsuxseg7_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8923,12 +8791,12 @@ define void @test_vsuxseg7_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8944,12 +8812,12 @@ define void @test_vsuxseg7_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg7_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -8962,12 +8830,12 @@ define void @test_vsuxseg7_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -8983,13 +8851,13 @@ define void @test_vsuxseg8_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9002,13 +8870,13 @@ define void @test_vsuxseg8_mask_nxv1i8_nxv1i64(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9024,13 +8892,13 @@ define void @test_vsuxseg8_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9043,13 +8911,13 @@ define void @test_vsuxseg8_mask_nxv1i8_nxv1i32(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9065,13 +8933,13 @@ define void @test_vsuxseg8_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9084,13 +8952,13 @@ define void @test_vsuxseg8_mask_nxv1i8_nxv1i16(<vscale x 1 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9106,13 +8974,13 @@ define void @test_vsuxseg8_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg8_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9125,13 +8993,13 @@ define void @test_vsuxseg8_mask_nxv1i8_nxv1i8(<vscale x 1 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1i8_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9146,7 +9014,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i32(<vscale x 2 x i8>,<vscale
 define void @test_vsuxseg2_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -9160,7 +9027,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -9177,7 +9043,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i8(<vscale x 2 x i8>,<vscale x
 define void @test_vsuxseg2_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -9191,7 +9056,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -9208,7 +9072,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i16(<vscale x 2 x i8>,<vscale
 define void @test_vsuxseg2_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -9222,7 +9085,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
@@ -9239,7 +9101,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale
 define void @test_vsuxseg2_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10
@@ -9252,7 +9113,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10, v0.t
@@ -9269,8 +9129,8 @@ define void @test_vsuxseg3_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9283,8 +9143,8 @@ define void @test_vsuxseg3_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9300,8 +9160,8 @@ define void @test_vsuxseg3_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9314,8 +9174,8 @@ define void @test_vsuxseg3_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9331,8 +9191,8 @@ define void @test_vsuxseg3_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9345,8 +9205,8 @@ define void @test_vsuxseg3_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9361,11 +9221,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64(<vscale x 2 x i8>,<vscale
 define void @test_vsuxseg3_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vsuxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
@@ -9375,11 +9235,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
-; CHECK-NEXT:    vsuxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv2i8.nxv2i64(<vscale x 2 x i8> %val,<vscale x 2 x i8> %val,<vscale x 2 x i8> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
@@ -9393,9 +9253,9 @@ define void @test_vsuxseg4_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9408,9 +9268,9 @@ define void @test_vsuxseg4_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9426,9 +9286,9 @@ define void @test_vsuxseg4_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9441,9 +9301,9 @@ define void @test_vsuxseg4_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9459,9 +9319,9 @@ define void @test_vsuxseg4_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9474,9 +9334,9 @@ define void @test_vsuxseg4_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9492,9 +9352,9 @@ define void @test_vsuxseg4_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg4_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -9507,9 +9367,9 @@ define void @test_vsuxseg4_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -9525,10 +9385,10 @@ define void @test_vsuxseg5_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9541,10 +9401,10 @@ define void @test_vsuxseg5_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9560,10 +9420,10 @@ define void @test_vsuxseg5_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9576,10 +9436,10 @@ define void @test_vsuxseg5_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9595,10 +9455,10 @@ define void @test_vsuxseg5_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9611,10 +9471,10 @@ define void @test_vsuxseg5_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9630,10 +9490,10 @@ define void @test_vsuxseg5_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg5_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -9646,10 +9506,10 @@ define void @test_vsuxseg5_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -9665,11 +9525,11 @@ define void @test_vsuxseg6_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9682,11 +9542,11 @@ define void @test_vsuxseg6_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9702,11 +9562,11 @@ define void @test_vsuxseg6_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9719,11 +9579,11 @@ define void @test_vsuxseg6_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9739,11 +9599,11 @@ define void @test_vsuxseg6_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9756,11 +9616,11 @@ define void @test_vsuxseg6_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9776,11 +9636,11 @@ define void @test_vsuxseg6_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg6_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -9793,11 +9653,11 @@ define void @test_vsuxseg6_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -9813,12 +9673,12 @@ define void @test_vsuxseg7_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9831,12 +9691,12 @@ define void @test_vsuxseg7_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9852,12 +9712,12 @@ define void @test_vsuxseg7_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9870,12 +9730,12 @@ define void @test_vsuxseg7_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9891,12 +9751,12 @@ define void @test_vsuxseg7_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9909,12 +9769,12 @@ define void @test_vsuxseg7_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -9930,12 +9790,12 @@ define void @test_vsuxseg7_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg7_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -9948,12 +9808,12 @@ define void @test_vsuxseg7_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -9969,13 +9829,13 @@ define void @test_vsuxseg8_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -9988,13 +9848,13 @@ define void @test_vsuxseg8_mask_nxv2i8_nxv2i32(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10010,13 +9870,13 @@ define void @test_vsuxseg8_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base, <vsc
 ; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10029,13 +9889,13 @@ define void @test_vsuxseg8_mask_nxv2i8_nxv2i8(<vscale x 2 x i8> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10051,13 +9911,13 @@ define void @test_vsuxseg8_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10070,13 +9930,13 @@ define void @test_vsuxseg8_mask_nxv2i8_nxv2i16(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10092,13 +9952,13 @@ define void @test_vsuxseg8_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base, <vs
 ; CHECK-LABEL: test_vsuxseg8_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -10111,13 +9971,13 @@ define void @test_vsuxseg8_mask_nxv2i8_nxv2i64(<vscale x 2 x i8> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i8_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -10132,7 +9992,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i16(<vscale x 8 x i32>,<vscal
 define void @test_vsuxseg2_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -10146,7 +10005,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i32_nxv8i16(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -10163,7 +10021,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i8(<vscale x 8 x i32>,<vscale
 define void @test_vsuxseg2_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -10177,7 +10034,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i32_nxv8i8(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -10194,7 +10050,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i64(<vscale x 8 x i32>,<vscal
 define void @test_vsuxseg2_nxv8i32_nxv8i64(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
@@ -10207,7 +10062,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i32_nxv8i64(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
@@ -10223,7 +10077,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8i32.nxv8i32(<vscale x 8 x i32>,<vscal
 define void @test_vsuxseg2_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -10237,7 +10090,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8i32_nxv8i32(<vscale x 8 x i32> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8i32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -10254,7 +10106,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i16(<vscale x 32 x i8>,<vsca
 define void @test_vsuxseg2_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16
@@ -10267,7 +10118,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv32i8_nxv32i16(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i16> %index, <vscale x 32 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei16.v v8, (a0), v16, v0.t
@@ -10283,7 +10133,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv32i8.nxv32i8(<vscale x 32 x i8>,<vscal
 define void @test_vsuxseg2_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -10297,7 +10146,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv32i8_nxv32i8(<vscale x 32 x i8> %val, ptr %base, <vscale x 32 x i8> %index, <vscale x 32 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv32i8_nxv32i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, ma
@@ -10314,7 +10162,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i32(<vscale x 2 x i16>,<vscal
 define void @test_vsuxseg2_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -10328,7 +10175,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -10345,7 +10191,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i8(<vscale x 2 x i16>,<vscale
 define void @test_vsuxseg2_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -10359,7 +10204,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -10376,7 +10220,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i16(<vscale x 2 x i16>,<vscal
 define void @test_vsuxseg2_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -10390,7 +10233,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -10407,7 +10249,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscal
 define void @test_vsuxseg2_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10
@@ -10420,7 +10261,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10, v0.t
@@ -10437,8 +10277,8 @@ define void @test_vsuxseg3_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10451,8 +10291,8 @@ define void @test_vsuxseg3_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10468,8 +10308,8 @@ define void @test_vsuxseg3_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10482,8 +10322,8 @@ define void @test_vsuxseg3_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10499,8 +10339,8 @@ define void @test_vsuxseg3_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10513,8 +10353,8 @@ define void @test_vsuxseg3_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10529,11 +10369,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64(<vscale x 2 x i16>,<vscal
 define void @test_vsuxseg3_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
@@ -10543,11 +10383,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv2i16.nxv2i64(<vscale x 2 x i16> %val,<vscale x 2 x i16> %val,<vscale x 2 x i16> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
@@ -10561,9 +10401,9 @@ define void @test_vsuxseg4_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10576,9 +10416,9 @@ define void @test_vsuxseg4_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10594,9 +10434,9 @@ define void @test_vsuxseg4_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10609,9 +10449,9 @@ define void @test_vsuxseg4_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10627,9 +10467,9 @@ define void @test_vsuxseg4_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10642,9 +10482,9 @@ define void @test_vsuxseg4_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10660,9 +10500,9 @@ define void @test_vsuxseg4_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -10675,9 +10515,9 @@ define void @test_vsuxseg4_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -10693,10 +10533,10 @@ define void @test_vsuxseg5_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10709,10 +10549,10 @@ define void @test_vsuxseg5_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10728,10 +10568,10 @@ define void @test_vsuxseg5_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10744,10 +10584,10 @@ define void @test_vsuxseg5_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10763,10 +10603,10 @@ define void @test_vsuxseg5_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10779,10 +10619,10 @@ define void @test_vsuxseg5_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10798,10 +10638,10 @@ define void @test_vsuxseg5_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -10814,10 +10654,10 @@ define void @test_vsuxseg5_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -10833,11 +10673,11 @@ define void @test_vsuxseg6_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10850,11 +10690,11 @@ define void @test_vsuxseg6_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10870,11 +10710,11 @@ define void @test_vsuxseg6_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10887,11 +10727,11 @@ define void @test_vsuxseg6_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10907,11 +10747,11 @@ define void @test_vsuxseg6_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10924,11 +10764,11 @@ define void @test_vsuxseg6_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -10944,11 +10784,11 @@ define void @test_vsuxseg6_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -10961,11 +10801,11 @@ define void @test_vsuxseg6_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -10981,12 +10821,12 @@ define void @test_vsuxseg7_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -10999,12 +10839,12 @@ define void @test_vsuxseg7_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11020,12 +10860,12 @@ define void @test_vsuxseg7_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11038,12 +10878,12 @@ define void @test_vsuxseg7_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11059,12 +10899,12 @@ define void @test_vsuxseg7_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11077,12 +10917,12 @@ define void @test_vsuxseg7_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11098,12 +10938,12 @@ define void @test_vsuxseg7_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11116,12 +10956,12 @@ define void @test_vsuxseg7_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11137,13 +10977,13 @@ define void @test_vsuxseg8_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11156,13 +10996,13 @@ define void @test_vsuxseg8_mask_nxv2i16_nxv2i32(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11178,13 +11018,13 @@ define void @test_vsuxseg8_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11197,13 +11037,13 @@ define void @test_vsuxseg8_mask_nxv2i16_nxv2i8(<vscale x 2 x i16> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11219,13 +11059,13 @@ define void @test_vsuxseg8_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -11238,13 +11078,13 @@ define void @test_vsuxseg8_mask_nxv2i16_nxv2i16(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -11260,13 +11100,13 @@ define void @test_vsuxseg8_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11279,13 +11119,13 @@ define void @test_vsuxseg8_mask_nxv2i16_nxv2i64(<vscale x 2 x i16> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2i16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11300,7 +11140,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i32(<vscale x 2 x i64>,<vscal
 define void @test_vsuxseg2_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11314,7 +11153,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11331,7 +11169,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i8(<vscale x 2 x i64>,<vscale
 define void @test_vsuxseg2_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11345,7 +11182,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11362,7 +11198,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i16(<vscale x 2 x i64>,<vscal
 define void @test_vsuxseg2_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11376,7 +11211,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11393,7 +11227,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2i64.nxv2i64(<vscale x 2 x i64>,<vscal
 define void @test_vsuxseg2_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11407,7 +11240,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -11425,8 +11257,8 @@ define void @test_vsuxseg3_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11439,8 +11271,8 @@ define void @test_vsuxseg3_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11456,8 +11288,8 @@ define void @test_vsuxseg3_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11470,8 +11302,8 @@ define void @test_vsuxseg3_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11487,8 +11319,8 @@ define void @test_vsuxseg3_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11501,8 +11333,8 @@ define void @test_vsuxseg3_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11518,8 +11350,8 @@ define void @test_vsuxseg3_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11532,8 +11364,8 @@ define void @test_vsuxseg3_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11549,9 +11381,9 @@ define void @test_vsuxseg4_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11564,9 +11396,9 @@ define void @test_vsuxseg4_mask_nxv2i64_nxv2i32(<vscale x 2 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11582,9 +11414,9 @@ define void @test_vsuxseg4_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %base, <v
 ; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11597,9 +11429,9 @@ define void @test_vsuxseg4_mask_nxv2i64_nxv2i8(<vscale x 2 x i64> %val, ptr %bas
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11615,9 +11447,9 @@ define void @test_vsuxseg4_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11630,9 +11462,9 @@ define void @test_vsuxseg4_mask_nxv2i64_nxv2i16(<vscale x 2 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11648,9 +11480,9 @@ define void @test_vsuxseg4_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -11663,9 +11495,9 @@ define void @test_vsuxseg4_mask_nxv2i64_nxv2i64(<vscale x 2 x i64> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2i64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -11680,7 +11512,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i16(<vscale x 16 x half>,<v
 define void @test_vsuxseg2_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -11694,7 +11525,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16f16_nxv16i16(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -11711,7 +11541,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i8(<vscale x 16 x half>,<vs
 define void @test_vsuxseg2_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -11725,7 +11554,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16f16_nxv16i8(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i8> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
@@ -11742,7 +11570,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16f16.nxv16i32(<vscale x 16 x half>,<v
 define void @test_vsuxseg2_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16
@@ -11755,7 +11582,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16f16_nxv16i32(<vscale x 16 x half> %val, ptr %base, <vscale x 16 x i32> %index, <vscale x 16 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16f16_nxv16i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v16, v0.t
@@ -11771,7 +11597,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i32(<vscale x 4 x double>,<vs
 define void @test_vsuxseg2_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -11785,7 +11610,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f64_nxv4i32(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -11802,7 +11626,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i8(<vscale x 4 x double>,<vsc
 define void @test_vsuxseg2_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -11816,7 +11639,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f64_nxv4i8(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -11833,7 +11655,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i64(<vscale x 4 x double>,<vs
 define void @test_vsuxseg2_nxv4f64_nxv4i64(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -11847,7 +11668,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f64_nxv4i64(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -11864,7 +11684,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f64.nxv4i16(<vscale x 4 x double>,<vs
 define void @test_vsuxseg2_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -11878,7 +11697,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f64_nxv4i16(<vscale x 4 x double> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f64_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, ma
@@ -11895,7 +11713,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i64(<vscale x 1 x double>,<vs
 define void @test_vsuxseg2_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -11909,7 +11726,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -11926,7 +11742,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i32(<vscale x 1 x double>,<vs
 define void @test_vsuxseg2_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -11940,7 +11755,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -11957,7 +11771,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i16(<vscale x 1 x double>,<vs
 define void @test_vsuxseg2_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -11971,7 +11784,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -11988,7 +11800,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f64.nxv1i8(<vscale x 1 x double>,<vsc
 define void @test_vsuxseg2_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -12002,7 +11813,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
@@ -12020,8 +11830,8 @@ define void @test_vsuxseg3_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12034,8 +11844,8 @@ define void @test_vsuxseg3_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12051,8 +11861,8 @@ define void @test_vsuxseg3_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12065,8 +11875,8 @@ define void @test_vsuxseg3_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12082,8 +11892,8 @@ define void @test_vsuxseg3_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12096,8 +11906,8 @@ define void @test_vsuxseg3_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12113,8 +11923,8 @@ define void @test_vsuxseg3_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12127,8 +11937,8 @@ define void @test_vsuxseg3_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12144,9 +11954,9 @@ define void @test_vsuxseg4_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12159,9 +11969,9 @@ define void @test_vsuxseg4_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12177,9 +11987,9 @@ define void @test_vsuxseg4_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12192,9 +12002,9 @@ define void @test_vsuxseg4_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12210,9 +12020,9 @@ define void @test_vsuxseg4_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12225,9 +12035,9 @@ define void @test_vsuxseg4_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12243,9 +12053,9 @@ define void @test_vsuxseg4_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12258,9 +12068,9 @@ define void @test_vsuxseg4_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12276,10 +12086,10 @@ define void @test_vsuxseg5_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12292,10 +12102,10 @@ define void @test_vsuxseg5_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12311,10 +12121,10 @@ define void @test_vsuxseg5_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12327,10 +12137,10 @@ define void @test_vsuxseg5_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12346,10 +12156,10 @@ define void @test_vsuxseg5_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12362,10 +12172,10 @@ define void @test_vsuxseg5_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12381,10 +12191,10 @@ define void @test_vsuxseg5_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12397,10 +12207,10 @@ define void @test_vsuxseg5_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12416,11 +12226,11 @@ define void @test_vsuxseg6_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12433,11 +12243,11 @@ define void @test_vsuxseg6_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12453,11 +12263,11 @@ define void @test_vsuxseg6_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12470,11 +12280,11 @@ define void @test_vsuxseg6_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12490,11 +12300,11 @@ define void @test_vsuxseg6_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12507,11 +12317,11 @@ define void @test_vsuxseg6_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12527,11 +12337,11 @@ define void @test_vsuxseg6_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12544,11 +12354,11 @@ define void @test_vsuxseg6_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12564,12 +12374,12 @@ define void @test_vsuxseg7_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12582,12 +12392,12 @@ define void @test_vsuxseg7_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12603,12 +12413,12 @@ define void @test_vsuxseg7_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12621,12 +12431,12 @@ define void @test_vsuxseg7_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12642,12 +12452,12 @@ define void @test_vsuxseg7_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12660,12 +12470,12 @@ define void @test_vsuxseg7_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12681,12 +12491,12 @@ define void @test_vsuxseg7_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12699,12 +12509,12 @@ define void @test_vsuxseg7_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12720,13 +12530,13 @@ define void @test_vsuxseg8_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12739,13 +12549,13 @@ define void @test_vsuxseg8_mask_nxv1f64_nxv1i64(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12761,13 +12571,13 @@ define void @test_vsuxseg8_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12780,13 +12590,13 @@ define void @test_vsuxseg8_mask_nxv1f64_nxv1i32(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12802,13 +12612,13 @@ define void @test_vsuxseg8_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12821,13 +12631,13 @@ define void @test_vsuxseg8_mask_nxv1f64_nxv1i16(<vscale x 1 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12843,13 +12653,13 @@ define void @test_vsuxseg8_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -12862,13 +12672,13 @@ define void @test_vsuxseg8_mask_nxv1f64_nxv1i8(<vscale x 1 x double> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f64_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -12883,7 +12693,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i32(<vscale x 2 x float>,<vsc
 define void @test_vsuxseg2_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -12897,7 +12706,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -12914,7 +12722,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i8(<vscale x 2 x float>,<vsca
 define void @test_vsuxseg2_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -12928,7 +12735,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -12945,7 +12751,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i16(<vscale x 2 x float>,<vsc
 define void @test_vsuxseg2_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -12959,7 +12764,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
@@ -12976,7 +12780,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f32.nxv2i64(<vscale x 2 x float>,<vsc
 define void @test_vsuxseg2_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10
@@ -12989,7 +12792,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10, v0.t
@@ -13006,8 +12808,8 @@ define void @test_vsuxseg3_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13020,8 +12822,8 @@ define void @test_vsuxseg3_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13037,8 +12839,8 @@ define void @test_vsuxseg3_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13051,8 +12853,8 @@ define void @test_vsuxseg3_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13068,8 +12870,8 @@ define void @test_vsuxseg3_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13082,8 +12884,8 @@ define void @test_vsuxseg3_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13098,11 +12900,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64(<vscale x 2 x float>,<vsc
 define void @test_vsuxseg3_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vsuxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
@@ -13112,11 +12914,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
-; CHECK-NEXT:    vsuxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv2f32.nxv2i64(<vscale x 2 x float> %val,<vscale x 2 x float> %val,<vscale x 2 x float> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
@@ -13130,9 +12932,9 @@ define void @test_vsuxseg4_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13145,9 +12947,9 @@ define void @test_vsuxseg4_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13163,9 +12965,9 @@ define void @test_vsuxseg4_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13178,9 +12980,9 @@ define void @test_vsuxseg4_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13196,9 +12998,9 @@ define void @test_vsuxseg4_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13211,9 +13013,9 @@ define void @test_vsuxseg4_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13229,9 +13031,9 @@ define void @test_vsuxseg4_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13244,9 +13046,9 @@ define void @test_vsuxseg4_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13262,10 +13064,10 @@ define void @test_vsuxseg5_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13278,10 +13080,10 @@ define void @test_vsuxseg5_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13297,10 +13099,10 @@ define void @test_vsuxseg5_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13313,10 +13115,10 @@ define void @test_vsuxseg5_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13332,10 +13134,10 @@ define void @test_vsuxseg5_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13348,10 +13150,10 @@ define void @test_vsuxseg5_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13367,10 +13169,10 @@ define void @test_vsuxseg5_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13383,10 +13185,10 @@ define void @test_vsuxseg5_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13402,11 +13204,11 @@ define void @test_vsuxseg6_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13419,11 +13221,11 @@ define void @test_vsuxseg6_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13439,11 +13241,11 @@ define void @test_vsuxseg6_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13456,11 +13258,11 @@ define void @test_vsuxseg6_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13476,11 +13278,11 @@ define void @test_vsuxseg6_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13493,11 +13295,11 @@ define void @test_vsuxseg6_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13513,11 +13315,11 @@ define void @test_vsuxseg6_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13530,11 +13332,11 @@ define void @test_vsuxseg6_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13550,12 +13352,12 @@ define void @test_vsuxseg7_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13568,12 +13370,12 @@ define void @test_vsuxseg7_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13589,12 +13391,12 @@ define void @test_vsuxseg7_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13607,12 +13409,12 @@ define void @test_vsuxseg7_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13628,12 +13430,12 @@ define void @test_vsuxseg7_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13646,12 +13448,12 @@ define void @test_vsuxseg7_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13667,12 +13469,12 @@ define void @test_vsuxseg7_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13685,12 +13487,12 @@ define void @test_vsuxseg7_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13706,13 +13508,13 @@ define void @test_vsuxseg8_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13725,13 +13527,13 @@ define void @test_vsuxseg8_mask_nxv2f32_nxv2i32(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13747,13 +13549,13 @@ define void @test_vsuxseg8_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13766,13 +13568,13 @@ define void @test_vsuxseg8_mask_nxv2f32_nxv2i8(<vscale x 2 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13788,13 +13590,13 @@ define void @test_vsuxseg8_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -13807,13 +13609,13 @@ define void @test_vsuxseg8_mask_nxv2f32_nxv2i16(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -13829,13 +13631,13 @@ define void @test_vsuxseg8_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -13848,13 +13650,13 @@ define void @test_vsuxseg8_mask_nxv2f32_nxv2i64(<vscale x 2 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f32_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -13869,7 +13671,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i64(<vscale x 1 x half>,<vsca
 define void @test_vsuxseg2_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -13883,7 +13684,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -13900,7 +13700,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i32(<vscale x 1 x half>,<vsca
 define void @test_vsuxseg2_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -13914,7 +13713,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -13931,7 +13729,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i16(<vscale x 1 x half>,<vsca
 define void @test_vsuxseg2_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -13945,7 +13742,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -13962,7 +13758,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f16.nxv1i8(<vscale x 1 x half>,<vscal
 define void @test_vsuxseg2_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -13976,7 +13771,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
@@ -13994,8 +13788,8 @@ define void @test_vsuxseg3_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14008,8 +13802,8 @@ define void @test_vsuxseg3_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14025,8 +13819,8 @@ define void @test_vsuxseg3_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14039,8 +13833,8 @@ define void @test_vsuxseg3_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14056,8 +13850,8 @@ define void @test_vsuxseg3_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14070,8 +13864,8 @@ define void @test_vsuxseg3_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14087,8 +13881,8 @@ define void @test_vsuxseg3_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14101,8 +13895,8 @@ define void @test_vsuxseg3_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14118,9 +13912,9 @@ define void @test_vsuxseg4_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14133,9 +13927,9 @@ define void @test_vsuxseg4_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14151,9 +13945,9 @@ define void @test_vsuxseg4_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14166,9 +13960,9 @@ define void @test_vsuxseg4_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14184,9 +13978,9 @@ define void @test_vsuxseg4_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14199,9 +13993,9 @@ define void @test_vsuxseg4_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14217,9 +14011,9 @@ define void @test_vsuxseg4_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14232,9 +14026,9 @@ define void @test_vsuxseg4_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14250,10 +14044,10 @@ define void @test_vsuxseg5_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14266,10 +14060,10 @@ define void @test_vsuxseg5_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14285,10 +14079,10 @@ define void @test_vsuxseg5_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14301,10 +14095,10 @@ define void @test_vsuxseg5_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14320,10 +14114,10 @@ define void @test_vsuxseg5_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14336,10 +14130,10 @@ define void @test_vsuxseg5_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14355,10 +14149,10 @@ define void @test_vsuxseg5_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14371,10 +14165,10 @@ define void @test_vsuxseg5_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14390,11 +14184,11 @@ define void @test_vsuxseg6_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14407,11 +14201,11 @@ define void @test_vsuxseg6_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14427,11 +14221,11 @@ define void @test_vsuxseg6_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14444,11 +14238,11 @@ define void @test_vsuxseg6_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14464,11 +14258,11 @@ define void @test_vsuxseg6_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14481,11 +14275,11 @@ define void @test_vsuxseg6_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14501,11 +14295,11 @@ define void @test_vsuxseg6_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14518,11 +14312,11 @@ define void @test_vsuxseg6_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14538,12 +14332,12 @@ define void @test_vsuxseg7_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14556,12 +14350,12 @@ define void @test_vsuxseg7_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14577,12 +14371,12 @@ define void @test_vsuxseg7_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14595,12 +14389,12 @@ define void @test_vsuxseg7_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14616,12 +14410,12 @@ define void @test_vsuxseg7_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14634,12 +14428,12 @@ define void @test_vsuxseg7_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14655,12 +14449,12 @@ define void @test_vsuxseg7_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14673,12 +14467,12 @@ define void @test_vsuxseg7_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14694,13 +14488,13 @@ define void @test_vsuxseg8_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14713,13 +14507,13 @@ define void @test_vsuxseg8_mask_nxv1f16_nxv1i64(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14735,13 +14529,13 @@ define void @test_vsuxseg8_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14754,13 +14548,13 @@ define void @test_vsuxseg8_mask_nxv1f16_nxv1i32(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14776,13 +14570,13 @@ define void @test_vsuxseg8_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14795,13 +14589,13 @@ define void @test_vsuxseg8_mask_nxv1f16_nxv1i16(<vscale x 1 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14817,13 +14611,13 @@ define void @test_vsuxseg8_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14836,13 +14630,13 @@ define void @test_vsuxseg8_mask_nxv1f16_nxv1i8(<vscale x 1 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f16_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -14857,7 +14651,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i64(<vscale x 1 x float>,<vsc
 define void @test_vsuxseg2_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -14871,7 +14664,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i64> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -14888,7 +14680,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i32(<vscale x 1 x float>,<vsc
 define void @test_vsuxseg2_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -14902,7 +14693,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i32> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -14919,7 +14709,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i16(<vscale x 1 x float>,<vsc
 define void @test_vsuxseg2_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -14933,7 +14722,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i16> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -14950,7 +14738,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv1f32.nxv1i8(<vscale x 1 x float>,<vsca
 define void @test_vsuxseg2_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -14964,7 +14751,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base, <vscale x 1 x i8> %index, <vscale x 1 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
@@ -14982,8 +14768,8 @@ define void @test_vsuxseg3_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -14996,8 +14782,8 @@ define void @test_vsuxseg3_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15013,8 +14799,8 @@ define void @test_vsuxseg3_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15027,8 +14813,8 @@ define void @test_vsuxseg3_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15044,8 +14830,8 @@ define void @test_vsuxseg3_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15058,8 +14844,8 @@ define void @test_vsuxseg3_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15075,8 +14861,8 @@ define void @test_vsuxseg3_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15089,8 +14875,8 @@ define void @test_vsuxseg3_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15106,9 +14892,9 @@ define void @test_vsuxseg4_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15121,9 +14907,9 @@ define void @test_vsuxseg4_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15139,9 +14925,9 @@ define void @test_vsuxseg4_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15154,9 +14940,9 @@ define void @test_vsuxseg4_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15172,9 +14958,9 @@ define void @test_vsuxseg4_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15187,9 +14973,9 @@ define void @test_vsuxseg4_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15205,9 +14991,9 @@ define void @test_vsuxseg4_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15220,9 +15006,9 @@ define void @test_vsuxseg4_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15238,10 +15024,10 @@ define void @test_vsuxseg5_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15254,10 +15040,10 @@ define void @test_vsuxseg5_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15273,10 +15059,10 @@ define void @test_vsuxseg5_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15289,10 +15075,10 @@ define void @test_vsuxseg5_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15308,10 +15094,10 @@ define void @test_vsuxseg5_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15324,10 +15110,10 @@ define void @test_vsuxseg5_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15343,10 +15129,10 @@ define void @test_vsuxseg5_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15359,10 +15145,10 @@ define void @test_vsuxseg5_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15378,11 +15164,11 @@ define void @test_vsuxseg6_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15395,11 +15181,11 @@ define void @test_vsuxseg6_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15415,11 +15201,11 @@ define void @test_vsuxseg6_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15432,11 +15218,11 @@ define void @test_vsuxseg6_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15452,11 +15238,11 @@ define void @test_vsuxseg6_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15469,11 +15255,11 @@ define void @test_vsuxseg6_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15489,11 +15275,11 @@ define void @test_vsuxseg6_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15506,11 +15292,11 @@ define void @test_vsuxseg6_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15526,12 +15312,12 @@ define void @test_vsuxseg7_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15544,12 +15330,12 @@ define void @test_vsuxseg7_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15565,12 +15351,12 @@ define void @test_vsuxseg7_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15583,12 +15369,12 @@ define void @test_vsuxseg7_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15604,12 +15390,12 @@ define void @test_vsuxseg7_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15622,12 +15408,12 @@ define void @test_vsuxseg7_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15643,12 +15429,12 @@ define void @test_vsuxseg7_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15661,12 +15447,12 @@ define void @test_vsuxseg7_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15682,13 +15468,13 @@ define void @test_vsuxseg8_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15701,13 +15487,13 @@ define void @test_vsuxseg8_mask_nxv1f32_nxv1i64(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15723,13 +15509,13 @@ define void @test_vsuxseg8_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15742,13 +15528,13 @@ define void @test_vsuxseg8_mask_nxv1f32_nxv1i32(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15764,13 +15550,13 @@ define void @test_vsuxseg8_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15783,13 +15569,13 @@ define void @test_vsuxseg8_mask_nxv1f32_nxv1i16(<vscale x 1 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15805,13 +15591,13 @@ define void @test_vsuxseg8_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -15824,13 +15610,13 @@ define void @test_vsuxseg8_mask_nxv1f32_nxv1i8(<vscale x 1 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv1f32_nxv1i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -15845,7 +15631,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i16(<vscale x 8 x half>,<vsca
 define void @test_vsuxseg2_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -15859,7 +15644,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -15876,7 +15660,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i8(<vscale x 8 x half>,<vscal
 define void @test_vsuxseg2_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -15890,7 +15673,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -15907,7 +15689,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i64(<vscale x 8 x half>,<vsca
 define void @test_vsuxseg2_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
@@ -15920,7 +15701,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
@@ -15936,7 +15716,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8f16.nxv8i32(<vscale x 8 x half>,<vsca
 define void @test_vsuxseg2_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12
@@ -15949,7 +15728,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v12, v0.t
@@ -15966,8 +15744,8 @@ define void @test_vsuxseg3_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -15980,8 +15758,8 @@ define void @test_vsuxseg3_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -15997,8 +15775,8 @@ define void @test_vsuxseg3_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16011,8 +15789,8 @@ define void @test_vsuxseg3_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16027,7 +15805,6 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i64(<vscale x 8 x half>,<vsca
 define void @test_vsuxseg3_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -16041,7 +15818,6 @@ entry:
 define void @test_vsuxseg3_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
@@ -16058,11 +15834,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half>,<vsca
 define void @test_vsuxseg3_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v16, (a0), v12
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl)
@@ -16072,11 +15848,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv8f16.nxv8i32(<vscale x 8 x half> %val,<vscale x 8 x half> %val,<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl)
@@ -16090,9 +15866,9 @@ define void @test_vsuxseg4_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16105,9 +15881,9 @@ define void @test_vsuxseg4_mask_nxv8f16_nxv8i16(<vscale x 8 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16123,9 +15899,9 @@ define void @test_vsuxseg4_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16138,9 +15914,9 @@ define void @test_vsuxseg4_mask_nxv8f16_nxv8i8(<vscale x 8 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16155,7 +15931,6 @@ declare void @llvm.riscv.vsuxseg4.mask.nxv8f16.nxv8i64(<vscale x 8 x half>,<vsca
 define void @test_vsuxseg4_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -16170,7 +15945,6 @@ entry:
 define void @test_vsuxseg4_mask_nxv8f16_nxv8i64(<vscale x 8 x half> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2_v12m2_v14m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vmv2r.v v14, v8
@@ -16189,9 +15963,9 @@ define void @test_vsuxseg4_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -16204,9 +15978,9 @@ define void @test_vsuxseg4_mask_nxv8f16_nxv8i32(<vscale x 8 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv8f16_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -16221,7 +15995,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i16(<vscale x 8 x float>,<vsc
 define void @test_vsuxseg2_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -16235,7 +16008,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8f32_nxv8i16(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i16> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv2r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -16252,7 +16024,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i8(<vscale x 8 x float>,<vsca
 define void @test_vsuxseg2_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -16266,7 +16037,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8f32_nxv8i8(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i8> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv1r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -16283,7 +16053,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i64(<vscale x 8 x float>,<vsc
 define void @test_vsuxseg2_nxv8f32_nxv8i64(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16
@@ -16296,7 +16065,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8f32_nxv8i64(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i64> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v16, v0.t
@@ -16312,7 +16080,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv8f32.nxv8i32(<vscale x 8 x float>,<vsc
 define void @test_vsuxseg2_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -16326,7 +16093,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv8f32_nxv8i32(<vscale x 8 x float> %val, ptr %base, <vscale x 8 x i32> %index, <vscale x 8 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv8f32_nxv8i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, ma
@@ -16343,7 +16109,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i32(<vscale x 2 x double>,<vs
 define void @test_vsuxseg2_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -16357,7 +16122,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -16374,7 +16138,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i8(<vscale x 2 x double>,<vsc
 define void @test_vsuxseg2_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -16388,7 +16151,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -16405,7 +16167,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i16(<vscale x 2 x double>,<vs
 define void @test_vsuxseg2_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -16419,7 +16180,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -16436,7 +16196,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f64.nxv2i64(<vscale x 2 x double>,<vs
 define void @test_vsuxseg2_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -16450,7 +16209,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
@@ -16468,8 +16226,8 @@ define void @test_vsuxseg3_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16482,8 +16240,8 @@ define void @test_vsuxseg3_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16499,8 +16257,8 @@ define void @test_vsuxseg3_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16513,8 +16271,8 @@ define void @test_vsuxseg3_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16530,8 +16288,8 @@ define void @test_vsuxseg3_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16544,8 +16302,8 @@ define void @test_vsuxseg3_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16561,8 +16319,8 @@ define void @test_vsuxseg3_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg3_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16575,8 +16333,8 @@ define void @test_vsuxseg3_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16592,9 +16350,9 @@ define void @test_vsuxseg4_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16607,9 +16365,9 @@ define void @test_vsuxseg4_mask_nxv2f64_nxv2i32(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16625,9 +16383,9 @@ define void @test_vsuxseg4_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16640,9 +16398,9 @@ define void @test_vsuxseg4_mask_nxv2f64_nxv2i8(<vscale x 2 x double> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16658,9 +16416,9 @@ define void @test_vsuxseg4_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16673,9 +16431,9 @@ define void @test_vsuxseg4_mask_nxv2f64_nxv2i16(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16691,9 +16449,9 @@ define void @test_vsuxseg4_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr %base
 ; CHECK-LABEL: test_vsuxseg4_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16706,9 +16464,9 @@ define void @test_vsuxseg4_mask_nxv2f64_nxv2i64(<vscale x 2 x double> %val, ptr
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f64_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -16723,7 +16481,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vsca
 define void @test_vsuxseg2_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10
@@ -16736,7 +16493,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei32.v v8, (a0), v10, v0.t
@@ -16752,7 +16508,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i8(<vscale x 4 x half>,<vscal
 define void @test_vsuxseg2_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -16766,7 +16521,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -16783,7 +16537,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vsca
 define void @test_vsuxseg2_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
@@ -16796,7 +16549,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
@@ -16812,7 +16564,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f16.nxv4i16(<vscale x 4 x half>,<vsca
 define void @test_vsuxseg2_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -16826,7 +16577,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -16843,11 +16593,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half>,<vsca
 define void @test_vsuxseg3_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl)
@@ -16857,11 +16607,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsuxseg3ei32.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i32(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl)
@@ -16875,8 +16625,8 @@ define void @test_vsuxseg3_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -16889,8 +16639,8 @@ define void @test_vsuxseg3_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -16905,7 +16655,6 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vsca
 define void @test_vsuxseg3_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -16919,7 +16668,6 @@ entry:
 define void @test_vsuxseg3_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
@@ -16937,8 +16685,8 @@ define void @test_vsuxseg3_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -16951,8 +16699,8 @@ define void @test_vsuxseg3_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -16968,9 +16716,9 @@ define void @test_vsuxseg4_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -16983,9 +16731,9 @@ define void @test_vsuxseg4_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -17001,9 +16749,9 @@ define void @test_vsuxseg4_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17016,9 +16764,9 @@ define void @test_vsuxseg4_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17033,7 +16781,6 @@ declare void @llvm.riscv.vsuxseg4.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vsca
 define void @test_vsuxseg4_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -17048,7 +16795,6 @@ entry:
 define void @test_vsuxseg4_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9_v10_v11
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vmv1r.v v11, v8
@@ -17067,9 +16813,9 @@ define void @test_vsuxseg4_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17082,9 +16828,9 @@ define void @test_vsuxseg4_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17100,10 +16846,10 @@ define void @test_vsuxseg5_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -17116,10 +16862,10 @@ define void @test_vsuxseg5_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -17135,10 +16881,10 @@ define void @test_vsuxseg5_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17151,10 +16897,10 @@ define void @test_vsuxseg5_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17169,13 +16915,13 @@ declare void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64(<vscale x 4 x half>,<vsca
 define void @test_vsuxseg5_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsuxseg5ei64.v v16, (a0), v12
+; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg5.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
@@ -17185,13 +16931,13 @@ entry:
 define void @test_vsuxseg5_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv1r.v v10, v8
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
-; CHECK-NEXT:    vsuxseg5ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsuxseg5ei64.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg5.mask.nxv4f16.nxv4i64(<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val,<vscale x 4 x half> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
@@ -17205,10 +16951,10 @@ define void @test_vsuxseg5_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17221,10 +16967,10 @@ define void @test_vsuxseg5_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17240,11 +16986,11 @@ define void @test_vsuxseg6_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -17257,11 +17003,11 @@ define void @test_vsuxseg6_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -17277,11 +17023,11 @@ define void @test_vsuxseg6_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17294,11 +17040,11 @@ define void @test_vsuxseg6_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17314,11 +17060,11 @@ define void @test_vsuxseg6_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -17331,11 +17077,11 @@ define void @test_vsuxseg6_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -17351,11 +17097,11 @@ define void @test_vsuxseg6_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17368,11 +17114,11 @@ define void @test_vsuxseg6_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17388,12 +17134,12 @@ define void @test_vsuxseg7_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -17406,12 +17152,12 @@ define void @test_vsuxseg7_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -17427,12 +17173,12 @@ define void @test_vsuxseg7_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17445,12 +17191,12 @@ define void @test_vsuxseg7_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17466,12 +17212,12 @@ define void @test_vsuxseg7_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -17484,12 +17230,12 @@ define void @test_vsuxseg7_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -17505,12 +17251,12 @@ define void @test_vsuxseg7_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17523,12 +17269,12 @@ define void @test_vsuxseg7_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17544,13 +17290,13 @@ define void @test_vsuxseg8_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -17563,13 +17309,13 @@ define void @test_vsuxseg8_mask_nxv4f16_nxv4i32(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -17585,13 +17331,13 @@ define void @test_vsuxseg8_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17604,13 +17350,13 @@ define void @test_vsuxseg8_mask_nxv4f16_nxv4i8(<vscale x 4 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17626,13 +17372,13 @@ define void @test_vsuxseg8_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -17645,13 +17391,13 @@ define void @test_vsuxseg8_mask_nxv4f16_nxv4i64(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v16, v8
-; CHECK-NEXT:    vmv1r.v v17, v16
-; CHECK-NEXT:    vmv1r.v v18, v16
-; CHECK-NEXT:    vmv1r.v v19, v16
-; CHECK-NEXT:    vmv1r.v v20, v16
-; CHECK-NEXT:    vmv1r.v v21, v16
-; CHECK-NEXT:    vmv1r.v v22, v16
-; CHECK-NEXT:    vmv1r.v v23, v16
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
+; CHECK-NEXT:    vmv1r.v v20, v8
+; CHECK-NEXT:    vmv1r.v v21, v8
+; CHECK-NEXT:    vmv1r.v v22, v8
+; CHECK-NEXT:    vmv1r.v v23, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -17667,13 +17413,13 @@ define void @test_vsuxseg8_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17686,13 +17432,13 @@ define void @test_vsuxseg8_mask_nxv4f16_nxv4i16(<vscale x 4 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv4f16_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17707,7 +17453,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i32(<vscale x 2 x half>,<vsca
 define void @test_vsuxseg2_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -17721,7 +17466,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i32> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -17738,7 +17482,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i8(<vscale x 2 x half>,<vscal
 define void @test_vsuxseg2_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -17752,7 +17495,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i8> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -17769,7 +17511,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i16(<vscale x 2 x half>,<vsca
 define void @test_vsuxseg2_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -17783,7 +17524,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i16> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 killed $v8_v9 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v10, v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
@@ -17800,7 +17540,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv2f16.nxv2i64(<vscale x 2 x half>,<vsca
 define void @test_vsuxseg2_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10
@@ -17813,7 +17552,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8_v9
 ; CHECK-NEXT:    vmv1r.v v9, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v10, v0.t
@@ -17830,8 +17568,8 @@ define void @test_vsuxseg3_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17844,8 +17582,8 @@ define void @test_vsuxseg3_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17861,8 +17599,8 @@ define void @test_vsuxseg3_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17875,8 +17613,8 @@ define void @test_vsuxseg3_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17892,8 +17630,8 @@ define void @test_vsuxseg3_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17906,8 +17644,8 @@ define void @test_vsuxseg3_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17922,11 +17660,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64(<vscale x 2 x half>,<vsca
 define void @test_vsuxseg3_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei64.v v12, (a0), v10
+; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, i64 %vl)
@@ -17936,11 +17674,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
+; CHECK-NEXT:    vmv1r.v v9, v8
+; CHECK-NEXT:    vmv2r.v v12, v10
+; CHECK-NEXT:    vmv1r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei64.v v12, (a0), v10, v0.t
+; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv2f16.nxv2i64(<vscale x 2 x half> %val,<vscale x 2 x half> %val,<vscale x 2 x half> %val, ptr %base, <vscale x 2 x i64> %index, <vscale x 2 x i1> %mask, i64 %vl)
@@ -17954,9 +17692,9 @@ define void @test_vsuxseg4_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -17969,9 +17707,9 @@ define void @test_vsuxseg4_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -17987,9 +17725,9 @@ define void @test_vsuxseg4_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18002,9 +17740,9 @@ define void @test_vsuxseg4_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18020,9 +17758,9 @@ define void @test_vsuxseg4_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18035,9 +17773,9 @@ define void @test_vsuxseg4_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18053,9 +17791,9 @@ define void @test_vsuxseg4_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18068,9 +17806,9 @@ define void @test_vsuxseg4_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18086,10 +17824,10 @@ define void @test_vsuxseg5_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18102,10 +17840,10 @@ define void @test_vsuxseg5_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18121,10 +17859,10 @@ define void @test_vsuxseg5_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18137,10 +17875,10 @@ define void @test_vsuxseg5_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18156,10 +17894,10 @@ define void @test_vsuxseg5_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18172,10 +17910,10 @@ define void @test_vsuxseg5_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18191,10 +17929,10 @@ define void @test_vsuxseg5_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg5_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18207,10 +17945,10 @@ define void @test_vsuxseg5_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg5_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg5ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18226,11 +17964,11 @@ define void @test_vsuxseg6_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18243,11 +17981,11 @@ define void @test_vsuxseg6_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18263,11 +18001,11 @@ define void @test_vsuxseg6_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18280,11 +18018,11 @@ define void @test_vsuxseg6_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18300,11 +18038,11 @@ define void @test_vsuxseg6_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18317,11 +18055,11 @@ define void @test_vsuxseg6_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18337,11 +18075,11 @@ define void @test_vsuxseg6_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg6_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18354,11 +18092,11 @@ define void @test_vsuxseg6_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg6_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg6ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18374,12 +18112,12 @@ define void @test_vsuxseg7_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18392,12 +18130,12 @@ define void @test_vsuxseg7_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18413,12 +18151,12 @@ define void @test_vsuxseg7_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18431,12 +18169,12 @@ define void @test_vsuxseg7_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18452,12 +18190,12 @@ define void @test_vsuxseg7_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18470,12 +18208,12 @@ define void @test_vsuxseg7_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18491,12 +18229,12 @@ define void @test_vsuxseg7_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg7_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18509,12 +18247,12 @@ define void @test_vsuxseg7_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg7_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg7ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18530,13 +18268,13 @@ define void @test_vsuxseg8_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18549,13 +18287,13 @@ define void @test_vsuxseg8_mask_nxv2f16_nxv2i32(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei32.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18571,13 +18309,13 @@ define void @test_vsuxseg8_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %base, <
 ; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18590,13 +18328,13 @@ define void @test_vsuxseg8_mask_nxv2f16_nxv2i8(<vscale x 2 x half> %val, ptr %ba
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei8.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18612,13 +18350,13 @@ define void @test_vsuxseg8_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9
 ; CHECK-NEXT:    ret
@@ -18631,13 +18369,13 @@ define void @test_vsuxseg8_mask_nxv2f16_nxv2i16(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v10, v8
-; CHECK-NEXT:    vmv1r.v v11, v10
-; CHECK-NEXT:    vmv1r.v v12, v10
-; CHECK-NEXT:    vmv1r.v v13, v10
-; CHECK-NEXT:    vmv1r.v v14, v10
-; CHECK-NEXT:    vmv1r.v v15, v10
-; CHECK-NEXT:    vmv1r.v v16, v10
-; CHECK-NEXT:    vmv1r.v v17, v10
+; CHECK-NEXT:    vmv1r.v v11, v8
+; CHECK-NEXT:    vmv1r.v v12, v8
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei16.v v10, (a0), v9, v0.t
 ; CHECK-NEXT:    ret
@@ -18653,13 +18391,13 @@ define void @test_vsuxseg8_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg8_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18672,13 +18410,13 @@ define void @test_vsuxseg8_mask_nxv2f16_nxv2i64(<vscale x 2 x half> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg8_mask_nxv2f16_nxv2i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv1r.v v12, v8
-; CHECK-NEXT:    vmv1r.v v13, v12
-; CHECK-NEXT:    vmv1r.v v14, v12
-; CHECK-NEXT:    vmv1r.v v15, v12
-; CHECK-NEXT:    vmv1r.v v16, v12
-; CHECK-NEXT:    vmv1r.v v17, v12
-; CHECK-NEXT:    vmv1r.v v18, v12
-; CHECK-NEXT:    vmv1r.v v19, v12
+; CHECK-NEXT:    vmv1r.v v13, v8
+; CHECK-NEXT:    vmv1r.v v14, v8
+; CHECK-NEXT:    vmv1r.v v15, v8
+; CHECK-NEXT:    vmv1r.v v16, v8
+; CHECK-NEXT:    vmv1r.v v17, v8
+; CHECK-NEXT:    vmv1r.v v18, v8
+; CHECK-NEXT:    vmv1r.v v19, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, ma
 ; CHECK-NEXT:    vsuxseg8ei64.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18693,7 +18431,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i32(<vscale x 4 x float>,<vsc
 define void @test_vsuxseg2_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -18707,7 +18444,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i32> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -18724,7 +18460,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i8(<vscale x 4 x float>,<vsca
 define void @test_vsuxseg2_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -18738,7 +18473,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i8> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -18755,7 +18489,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i64(<vscale x 4 x float>,<vsc
 define void @test_vsuxseg2_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12
@@ -18768,7 +18501,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg2ei64.v v8, (a0), v12, v0.t
@@ -18784,7 +18516,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv4f32.nxv4i16(<vscale x 4 x float>,<vsc
 define void @test_vsuxseg2_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -18798,7 +18529,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i16> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 killed $v8m2_v10m2 def $v8m2_v10m2
 ; CHECK-NEXT:    vmv1r.v v12, v10
 ; CHECK-NEXT:    vmv2r.v v10, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
@@ -18816,8 +18546,8 @@ define void @test_vsuxseg3_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18830,8 +18560,8 @@ define void @test_vsuxseg3_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18847,8 +18577,8 @@ define void @test_vsuxseg3_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18861,8 +18591,8 @@ define void @test_vsuxseg3_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18877,11 +18607,11 @@ declare void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64(<vscale x 4 x float>,<vsc
 define void @test_vsuxseg3_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei64.v v16, (a0), v12
+; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, i64 %vl)
@@ -18891,11 +18621,11 @@ entry:
 define void @test_vsuxseg3_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl) {
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
+; CHECK-NEXT:    vmv2r.v v10, v8
+; CHECK-NEXT:    vmv4r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v12, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
-; CHECK-NEXT:    vsuxseg3ei64.v v16, (a0), v12, v0.t
+; CHECK-NEXT:    vsuxseg3ei64.v v8, (a0), v16, v0.t
 ; CHECK-NEXT:    ret
 entry:
   tail call void @llvm.riscv.vsuxseg3.mask.nxv4f32.nxv4i64(<vscale x 4 x float> %val,<vscale x 4 x float> %val,<vscale x 4 x float> %val, ptr %base, <vscale x 4 x i64> %index, <vscale x 4 x i1> %mask, i64 %vl)
@@ -18909,8 +18639,8 @@ define void @test_vsuxseg3_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg3_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18923,8 +18653,8 @@ define void @test_vsuxseg3_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg3_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg3ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18940,9 +18670,9 @@ define void @test_vsuxseg4_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18955,9 +18685,9 @@ define void @test_vsuxseg4_mask_nxv4f32_nxv4i32(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i32:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei32.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -18973,9 +18703,9 @@ define void @test_vsuxseg4_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -18988,9 +18718,9 @@ define void @test_vsuxseg4_mask_nxv4f32_nxv4i8(<vscale x 4 x float> %val, ptr %b
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i8:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei8.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret
@@ -19006,9 +18736,9 @@ define void @test_vsuxseg4_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v16, (a0), v12
 ; CHECK-NEXT:    ret
@@ -19021,9 +18751,9 @@ define void @test_vsuxseg4_mask_nxv4f32_nxv4i64(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i64:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v16, v8
-; CHECK-NEXT:    vmv2r.v v18, v16
-; CHECK-NEXT:    vmv2r.v v20, v16
-; CHECK-NEXT:    vmv2r.v v22, v16
+; CHECK-NEXT:    vmv2r.v v18, v8
+; CHECK-NEXT:    vmv2r.v v20, v8
+; CHECK-NEXT:    vmv2r.v v22, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei64.v v16, (a0), v12, v0.t
 ; CHECK-NEXT:    ret
@@ -19039,9 +18769,9 @@ define void @test_vsuxseg4_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %base,
 ; CHECK-LABEL: test_vsuxseg4_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10
 ; CHECK-NEXT:    ret
@@ -19054,9 +18784,9 @@ define void @test_vsuxseg4_mask_nxv4f32_nxv4i16(<vscale x 4 x float> %val, ptr %
 ; CHECK-LABEL: test_vsuxseg4_mask_nxv4f32_nxv4i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vmv2r.v v12, v8
-; CHECK-NEXT:    vmv2r.v v14, v12
-; CHECK-NEXT:    vmv2r.v v16, v12
-; CHECK-NEXT:    vmv2r.v v18, v12
+; CHECK-NEXT:    vmv2r.v v14, v8
+; CHECK-NEXT:    vmv2r.v v16, v8
+; CHECK-NEXT:    vmv2r.v v18, v8
 ; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, ma
 ; CHECK-NEXT:    vsuxseg4ei16.v v12, (a0), v10, v0.t
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
index 24407b2ab4b0d..3ce9147622749 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll
@@ -285,29 +285,25 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
 ; CHECK-NEXT:    addi sp, sp, -16
 ; CHECK-NEXT:    .cfi_def_cfa_offset 16
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    li a3, 24
-; CHECK-NEXT:    mul a1, a1, a3
+; CHECK-NEXT:    slli a1, a1, 4
 ; CHECK-NEXT:    sub sp, sp, a1
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x18, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 24 * vlenb
+; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb
 ; CHECK-NEXT:    vmv1r.v v1, v0
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
-; CHECK-NEXT:    add a1, sp, a1
-; CHECK-NEXT:    addi a1, a1, 16
+; CHECK-NEXT:    addi a1, sp, 16
 ; CHECK-NEXT:    vs8r.v v16, (a1) # Unknown-size Folded Spill
 ; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 4
+; CHECK-NEXT:    slli a1, a1, 3
 ; CHECK-NEXT:    add a1, sp, a1
 ; CHECK-NEXT:    addi a1, a1, 16
 ; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    srli a3, a1, 2
 ; CHECK-NEXT:    vsetvli a4, zero, e8, mf2, ta, ma
-; CHECK-NEXT:    vslidedown.vx v25, v0, a3
+; CHECK-NEXT:    vslidedown.vx v16, v0, a3
 ; CHECK-NEXT:    vsetvli a3, zero, e8, mf4, ta, ma
 ; CHECK-NEXT:    slli a3, a1, 3
 ; CHECK-NEXT:    add a3, a0, a3
-; CHECK-NEXT:    vl8re64.v v8, (a3)
+; CHECK-NEXT:    vl8re64.v v24, (a3)
 ; CHECK-NEXT:    slli a3, a1, 1
 ; CHECK-NEXT:    sub a4, a2, a3
 ; CHECK-NEXT:    sltu a5, a2, a4
@@ -318,22 +314,18 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
 ; CHECK-NEXT:    addi a6, a6, -1
 ; CHECK-NEXT:    and a6, a6, a5
 ; CHECK-NEXT:    srli a5, a1, 3
-; CHECK-NEXT:    vl8re64.v v16, (a0)
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vs8r.v v16, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT:    vslidedown.vx v0, v25, a5
+; CHECK-NEXT:    vl8re64.v v8, (a0)
+; CHECK-NEXT:    vslidedown.vx v0, v16, a5
 ; CHECK-NEXT:    vsetvli zero, a6, e32, m4, ta, ma
-; CHECK-NEXT:    vnsrl.wi v20, v8, 0, v0.t
+; CHECK-NEXT:    vnsrl.wi v20, v24, 0, v0.t
 ; CHECK-NEXT:    bltu a4, a1, .LBB17_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a4, a1
 ; CHECK-NEXT:  .LBB17_2:
 ; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, ma
-; CHECK-NEXT:    vslidedown.vx v26, v1, a5
+; CHECK-NEXT:    vslidedown.vx v2, v1, a5
 ; CHECK-NEXT:    vsetvli zero, a4, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v25
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vmv1r.v v0, v16
 ; CHECK-NEXT:    vnsrl.wi v16, v8, 0, v0.t
 ; CHECK-NEXT:    bltu a2, a3, .LBB17_4
 ; CHECK-NEXT:  # %bb.3:
@@ -344,13 +336,10 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a0, a3, a0
 ; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, ma
-; CHECK-NEXT:    vmv1r.v v0, v26
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 3
-; CHECK-NEXT:    add a0, sp, a0
-; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vnsrl.wi v12, v24, 0, v0.t
+; CHECK-NEXT:    vmv1r.v v0, v2
+; CHECK-NEXT:    addi a0, sp, 16
+; CHECK-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vnsrl.wi v28, v8, 0, v0.t
 ; CHECK-NEXT:    bltu a2, a1, .LBB17_6
 ; CHECK-NEXT:  # %bb.5:
 ; CHECK-NEXT:    mv a2, a1
@@ -358,14 +347,14 @@ define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vs
 ; CHECK-NEXT:    vsetvli zero, a2, e32, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v1
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 4
+; CHECK-NEXT:    slli a0, a0, 3
 ; CHECK-NEXT:    add a0, sp, a0
 ; CHECK-NEXT:    addi a0, a0, 16
-; CHECK-NEXT:    vl8r.v v24, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vnsrl.wi v8, v24, 0, v0.t
+; CHECK-NEXT:    vl8r.v v8, (a0) # Unknown-size Folded Reload
+; CHECK-NEXT:    vnsrl.wi v24, v8, 0, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    li a1, 24
-; CHECK-NEXT:    mul a0, a0, a1
+; CHECK-NEXT:    slli a0, a0, 4
 ; CHECK-NEXT:    add sp, sp, a0
 ; CHECK-NEXT:    addi sp, sp, 16
 ; CHECK-NEXT:    ret

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
index 7cdd0af87f714..5107e1942c9f3 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vuitofp-vp.ll
@@ -306,15 +306,7 @@ declare <vscale x 32 x half> @llvm.vp.uitofp.nxv32f16.nxv32i32(<vscale x 32 x i3
 define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
 ; CHECK-LABEL: vuitofp_nxv32f16_nxv32i32:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    addi sp, sp, -16
-; CHECK-NEXT:    .cfi_def_cfa_offset 16
-; CHECK-NEXT:    csrr a1, vlenb
-; CHECK-NEXT:    slli a1, a1, 3
-; CHECK-NEXT:    sub sp, sp, a1
-; CHECK-NEXT:    .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x08, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 8 * vlenb
 ; CHECK-NEXT:    vmv1r.v v24, v0
-; CHECK-NEXT:    addi a1, sp, 16
-; CHECK-NEXT:    vs8r.v v8, (a1) # Unknown-size Folded Spill
 ; CHECK-NEXT:    csrr a1, vlenb
 ; CHECK-NEXT:    srli a2, a1, 2
 ; CHECK-NEXT:    vsetvli a3, zero, e8, mf2, ta, ma
@@ -325,20 +317,15 @@ define <vscale x 32 x half> @vuitofp_nxv32f16_nxv32i32(<vscale x 32 x i32> %va,
 ; CHECK-NEXT:    addi a3, a3, -1
 ; CHECK-NEXT:    and a2, a3, a2
 ; CHECK-NEXT:    vsetvli zero, a2, e16, m4, ta, ma
-; CHECK-NEXT:    vfncvt.f.xu.w v12, v16, v0.t
+; CHECK-NEXT:    vfncvt.f.xu.w v28, v16, v0.t
 ; CHECK-NEXT:    bltu a0, a1, .LBB25_2
 ; CHECK-NEXT:  # %bb.1:
 ; CHECK-NEXT:    mv a0, a1
 ; CHECK-NEXT:  .LBB25_2:
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, ma
 ; CHECK-NEXT:    vmv1r.v v0, v24
-; CHECK-NEXT:    addi a0, sp, 16
-; CHECK-NEXT:    vl8r.v v16, (a0) # Unknown-size Folded Reload
-; CHECK-NEXT:    vfncvt.f.xu.w v8, v16, v0.t
-; CHECK-NEXT:    csrr a0, vlenb
-; CHECK-NEXT:    slli a0, a0, 3
-; CHECK-NEXT:    add sp, sp, a0
-; CHECK-NEXT:    addi sp, sp, 16
+; CHECK-NEXT:    vfncvt.f.xu.w v24, v8, v0.t
+; CHECK-NEXT:    vmv8r.v v8, v24
 ; CHECK-NEXT:    ret
   %v = call <vscale x 32 x half> @llvm.vp.uitofp.nxv32f16.nxv32i32(<vscale x 32 x i32> %va, <vscale x 32 x i1> %m, i32 %evl)
   ret <vscale x 32 x half> %v

diff  --git a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll
index ec14e4eac8d70..0f1e33e47f03e 100644
--- a/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/zvlsseg-zero-vl.ll
@@ -15,7 +15,6 @@ define <vscale x 16 x i16> @test_vlseg2_mask_nxv16i16(ptr %base, <vscale x 16 x
 ; CHECK-NEXT:    vlseg2e16.v v4, (a0)
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlseg2e16.v v4, (a0), v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlseg2.nxv16i16(<vscale x 16 x i16> undef,<vscale x 16 x i16> undef, ptr %base, i64 0)
@@ -35,7 +34,6 @@ define <vscale x 16 x i16> @test_vlsseg2_mask_nxv16i16(ptr %base, i64 %offset, <
 ; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1
 ; CHECK-NEXT:    vmv4r.v v8, v4
 ; CHECK-NEXT:    vlsseg2e16.v v4, (a0), a1, v0.t
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>} @llvm.riscv.vlsseg2.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 %offset, i64 0)
@@ -94,7 +92,6 @@ define <vscale x 16 x i16> @test_vlseg2ff_nxv16i16(ptr %base, ptr %outvl) {
 ; CHECK-NEXT:    vlseg2e16ff.v v4, (a0)
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a1)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.nxv16i16(<vscale x 16 x i16> undef, <vscale x 16 x i16> undef, ptr %base, i64 0)
@@ -112,7 +109,6 @@ define <vscale x 16 x i16> @test_vlseg2ff_mask_nxv16i16(<vscale x 16 x i16> %val
 ; CHECK-NEXT:    vlseg2e16ff.v v4, (a0), v0.t
 ; CHECK-NEXT:    csrr a0, vl
 ; CHECK-NEXT:    sd a0, 0(a1)
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
 ; CHECK-NEXT:    ret
 entry:
   %0 = tail call {<vscale x 16 x i16>,<vscale x 16 x i16>, i64} @llvm.riscv.vlseg2ff.mask.nxv16i16(<vscale x 16 x i16> %val,<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask, i64 0, i64 1)
@@ -128,7 +124,6 @@ declare void @llvm.riscv.vsseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16 x
 define void @test_vsseg2_nxv16i16(<vscale x 16 x i16> %val, ptr %base) {
 ; CHECK-LABEL: test_vsseg2_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetivli zero, 0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0)
@@ -141,7 +136,6 @@ entry:
 define void @test_vsseg2_mask_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vsseg2_mask_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetivli zero, 0, e16, m4, ta, ma
 ; CHECK-NEXT:    vsseg2e16.v v8, (a0), v0.t
@@ -157,7 +151,6 @@ declare void @llvm.riscv.vssseg2.mask.nxv16i16(<vscale x 16 x i16>,<vscale x 16
 define void @test_vssseg2_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i64 %offset) {
 ; CHECK-LABEL: test_vssseg2_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetivli zero, 0, e16, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1
@@ -170,7 +163,6 @@ entry:
 define void @test_vssseg2_mask_nxv16i16(<vscale x 16 x i16> %val, ptr %base, i64 %offset, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vssseg2_mask_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetivli zero, 0, e16, m4, ta, ma
 ; CHECK-NEXT:    vssseg2e16.v v8, (a0), a1, v0.t
@@ -186,7 +178,6 @@ declare void @llvm.riscv.vsoxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16>,<vs
 define void @test_vsoxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index) {
 ; CHECK-LABEL: test_vsoxseg2_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetivli zero, 0, e16, m4, ta, ma
@@ -200,7 +191,6 @@ entry:
 define void @test_vsoxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vsoxseg2_mask_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetivli zero, 0, e16, m4, ta, ma
@@ -217,7 +207,6 @@ declare void @llvm.riscv.vsuxseg2.mask.nxv16i16.nxv16i16(<vscale x 16 x i16>,<vs
 define void @test_vsuxseg2_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index) {
 ; CHECK-LABEL: test_vsuxseg2_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetivli zero, 0, e16, m4, ta, ma
@@ -231,7 +220,6 @@ entry:
 define void @test_vsuxseg2_mask_nxv16i16_nxv16i16(<vscale x 16 x i16> %val, ptr %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask) {
 ; CHECK-LABEL: test_vsuxseg2_mask_nxv16i16_nxv16i16:
 ; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
 ; CHECK-NEXT:    vmv4r.v v16, v12
 ; CHECK-NEXT:    vmv4r.v v12, v8
 ; CHECK-NEXT:    vsetivli zero, 0, e16, m4, ta, ma

diff  --git a/llvm/test/CodeGen/RISCV/shuffle-reverse.ll b/llvm/test/CodeGen/RISCV/shuffle-reverse.ll
index 049e45bba8b6f..dcf1a05f034e2 100644
--- a/llvm/test/CodeGen/RISCV/shuffle-reverse.ll
+++ b/llvm/test/CodeGen/RISCV/shuffle-reverse.ll
@@ -117,7 +117,6 @@ define <32 x i8> @v16i8_2(<16 x i8> %a, <16 x i8> %b) {
 ; RV32-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; RV32-NEXT:    vle8.v v12, (a0)
 ; RV32-NEXT:    vmv1r.v v14, v9
-; RV32-NEXT:    # kill: def $v8 killed $v8 def $v8m2
 ; RV32-NEXT:    vrgather.vv v10, v8, v12
 ; RV32-NEXT:    vid.v v8
 ; RV32-NEXT:    vrsub.vi v8, v8, 15
@@ -138,7 +137,6 @@ define <32 x i8> @v16i8_2(<16 x i8> %a, <16 x i8> %b) {
 ; RV64-NEXT:    vsetvli zero, a1, e8, m2, ta, ma
 ; RV64-NEXT:    vle8.v v12, (a0)
 ; RV64-NEXT:    vmv1r.v v14, v9
-; RV64-NEXT:    # kill: def $v8 killed $v8 def $v8m2
 ; RV64-NEXT:    vrgather.vv v10, v8, v12
 ; RV64-NEXT:    vid.v v8
 ; RV64-NEXT:    vrsub.vi v8, v8, 15
@@ -232,7 +230,6 @@ define <16 x i16> @v8i16_2(<8 x i16> %a, <8 x i16> %b) {
 ; CHECK-LABEL: v8i16_2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8m2
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vid.v v14
 ; CHECK-NEXT:    vrsub.vi v16, v14, 15
@@ -267,19 +264,18 @@ define <32 x i16> @v16i16_2(<16 x i16> %a, <16 x i16> %b) {
 ; RV32-NEXT:    addi a0, a0, %lo(.LCPI15_0)
 ; RV32-NEXT:    li a1, 32
 ; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; RV32-NEXT:    vle16.v v16, (a0)
-; RV32-NEXT:    vmv2r.v v20, v10
-; RV32-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m4
-; RV32-NEXT:    vrgather.vv v12, v8, v16
-; RV32-NEXT:    vid.v v8
-; RV32-NEXT:    vrsub.vi v8, v8, 15
+; RV32-NEXT:    vle16.v v20, (a0)
+; RV32-NEXT:    vmv2r.v v16, v10
+; RV32-NEXT:    vmv2r.v v12, v8
+; RV32-NEXT:    vrgather.vv v8, v12, v20
+; RV32-NEXT:    vid.v v12
+; RV32-NEXT:    vrsub.vi v12, v12, 15
 ; RV32-NEXT:    lui a0, 16
 ; RV32-NEXT:    addi a0, a0, -1
 ; RV32-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; RV32-NEXT:    vmv.s.x v0, a0
 ; RV32-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; RV32-NEXT:    vrgather.vv v12, v20, v8, v0.t
-; RV32-NEXT:    vmv.v.v v8, v12
+; RV32-NEXT:    vrgather.vv v8, v16, v12, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: v16i16_2:
@@ -288,19 +284,18 @@ define <32 x i16> @v16i16_2(<16 x i16> %a, <16 x i16> %b) {
 ; RV64-NEXT:    addi a0, a0, %lo(.LCPI15_0)
 ; RV64-NEXT:    li a1, 32
 ; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, ma
-; RV64-NEXT:    vle16.v v16, (a0)
-; RV64-NEXT:    vmv2r.v v20, v10
-; RV64-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m4
-; RV64-NEXT:    vrgather.vv v12, v8, v16
-; RV64-NEXT:    vid.v v8
-; RV64-NEXT:    vrsub.vi v8, v8, 15
+; RV64-NEXT:    vle16.v v20, (a0)
+; RV64-NEXT:    vmv2r.v v16, v10
+; RV64-NEXT:    vmv2r.v v12, v8
+; RV64-NEXT:    vrgather.vv v8, v12, v20
+; RV64-NEXT:    vid.v v12
+; RV64-NEXT:    vrsub.vi v12, v12, 15
 ; RV64-NEXT:    lui a0, 16
 ; RV64-NEXT:    addiw a0, a0, -1
 ; RV64-NEXT:    vsetivli zero, 1, e32, mf2, ta, ma
 ; RV64-NEXT:    vmv.s.x v0, a0
 ; RV64-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
-; RV64-NEXT:    vrgather.vv v12, v20, v8, v0.t
-; RV64-NEXT:    vmv.v.v v8, v12
+; RV64-NEXT:    vrgather.vv v8, v16, v12, v0.t
 ; RV64-NEXT:    ret
   %v32i16 = shufflevector <16 x i16> %a, <16 x i16> %b,  <32 x i32> <i32 31, i32 30, i32 29, i32 28, i32 27, i32 26, i32 25, i32 24, i32 23, i32 22, i32 21, i32 20, i32 19, i32 18, i32 17, i32 16, i32 15, i32 14, i32 13, i32 12, i32 11, i32 10, i32 9, i32 8, i32 7, i32 6, i32 5, i32 4, i32 3, i32 2, i32 1, i32 0>
   ret <32 x i16> %v32i16
@@ -354,7 +349,6 @@ define <8 x i32> @v4i32_2(<4 x i32> %a, <4 x i32> %b) {
 ; CHECK-LABEL: v4i32_2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8m2
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vid.v v14
 ; CHECK-NEXT:    vrsub.vi v16, v14, 7
@@ -386,7 +380,6 @@ define <16 x i32> @v8i32_2(<8 x i32> %a, <8 x i32> %b) {
 ; CHECK-LABEL: v8i32_2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m4
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vid.v v20
 ; CHECK-NEXT:    vrsub.vi v24, v20, 15
@@ -475,16 +468,15 @@ define <4 x i64> @v2i64_2(<2 x i64> %a, < 2 x i64> %b) {
 ; CHECK-LABEL: v2i64_2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v12, v8, 1
+; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT:    vslideup.vi v12, v8, 1
+; CHECK-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v9, 1
+; CHECK-NEXT:    vslidedown.vi v8, v9, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT:    vslideup.vi v10, v9, 1
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
-; CHECK-NEXT:    vslideup.vi v10, v12, 2
-; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    vslideup.vi v8, v10, 2
 ; CHECK-NEXT:    ret
   %v4i64 = shufflevector <2 x i64> %a, <2 x i64> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
   ret <4 x i64> %v4i64
@@ -517,16 +509,15 @@ define <8 x i64> @v4i64_2(<4 x i64> %a, <4 x i64> %b) {
 ; RV32-LABEL: v4i64_2:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vmv2r.v v16, v10
-; RV32-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m4
 ; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT:    vid.v v20
-; RV32-NEXT:    vrsub.vi v21, v20, 7
+; RV32-NEXT:    vid.v v18
+; RV32-NEXT:    vrsub.vi v19, v18, 7
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV32-NEXT:    vrgatherei16.vv v12, v8, v21
+; RV32-NEXT:    vrgatherei16.vv v12, v8, v19
 ; RV32-NEXT:    li a0, 15
 ; RV32-NEXT:    vmv.s.x v0, a0
 ; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV32-NEXT:    vrsub.vi v8, v20, 3
+; RV32-NEXT:    vrsub.vi v8, v18, 3
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vrgatherei16.vv v12, v16, v8, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -535,7 +526,6 @@ define <8 x i64> @v4i64_2(<4 x i64> %a, <4 x i64> %b) {
 ; RV64-LABEL: v4i64_2:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vmv2r.v v16, v10
-; RV64-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m4
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    vid.v v20
 ; RV64-NEXT:    vrsub.vi v24, v20, 7
@@ -628,7 +618,6 @@ define <16 x half> @v8f16_2(<8 x half> %a, <8 x half> %b) {
 ; CHECK-LABEL: v8f16_2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8m2
 ; CHECK-NEXT:    vsetivli zero, 16, e16, m2, ta, mu
 ; CHECK-NEXT:    vid.v v14
 ; CHECK-NEXT:    vrsub.vi v16, v14, 15
@@ -659,7 +648,6 @@ define <16 x half> @v16f16(<16 x half> %a) {
 define <32 x half> @v16f16_2(<16 x half> %a) {
 ; CHECK-LABEL: v16f16_2:
 ; CHECK:       # %bb.0:
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m4
 ; CHECK-NEXT:    lui a0, %hi(.LCPI35_0)
 ; CHECK-NEXT:    addi a0, a0, %lo(.LCPI35_0)
 ; CHECK-NEXT:    li a1, 32
@@ -723,7 +711,6 @@ define <8 x float> @v4f32_2(<4 x float> %a, <4 x float> %b) {
 ; CHECK-LABEL: v4f32_2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv1r.v v12, v9
-; CHECK-NEXT:    # kill: def $v8 killed $v8 def $v8m2
 ; CHECK-NEXT:    vsetivli zero, 8, e32, m2, ta, mu
 ; CHECK-NEXT:    vid.v v14
 ; CHECK-NEXT:    vrsub.vi v16, v14, 7
@@ -755,7 +742,6 @@ define <16 x float> @v8f32_2(<8 x float> %a, <8 x float> %b) {
 ; CHECK-LABEL: v8f32_2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vmv2r.v v16, v10
-; CHECK-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m4
 ; CHECK-NEXT:    vsetivli zero, 16, e32, m4, ta, mu
 ; CHECK-NEXT:    vid.v v20
 ; CHECK-NEXT:    vrsub.vi v24, v20, 15
@@ -787,16 +773,15 @@ define <4 x double> @v2f64_2(<2 x double> %a, < 2 x double> %b) {
 ; CHECK-LABEL: v2f64_2:
 ; CHECK:       # %bb.0:
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v12, v8, 1
+; CHECK-NEXT:    vslidedown.vi v10, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT:    vslideup.vi v12, v8, 1
+; CHECK-NEXT:    vslideup.vi v10, v8, 1
 ; CHECK-NEXT:    vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT:    vslidedown.vi v10, v9, 1
+; CHECK-NEXT:    vslidedown.vi v8, v9, 1
 ; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, ma
-; CHECK-NEXT:    vslideup.vi v10, v9, 1
+; CHECK-NEXT:    vslideup.vi v8, v9, 1
 ; CHECK-NEXT:    vsetivli zero, 4, e64, m2, tu, ma
-; CHECK-NEXT:    vslideup.vi v10, v12, 2
-; CHECK-NEXT:    vmv2r.v v8, v10
+; CHECK-NEXT:    vslideup.vi v8, v10, 2
 ; CHECK-NEXT:    ret
   %v4f64 = shufflevector <2 x double> %a, <2 x double> %b, <4 x i32> <i32 3, i32 2, i32 1, i32 0>
   ret <4 x double> %v4f64
@@ -829,16 +814,15 @@ define <8 x double> @v4f64_2(<4 x double> %a, <4 x double> %b) {
 ; RV32-LABEL: v4f64_2:
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vmv2r.v v16, v10
-; RV32-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m4
 ; RV32-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
-; RV32-NEXT:    vid.v v20
-; RV32-NEXT:    vrsub.vi v21, v20, 7
+; RV32-NEXT:    vid.v v18
+; RV32-NEXT:    vrsub.vi v19, v18, 7
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, ma
-; RV32-NEXT:    vrgatherei16.vv v12, v8, v21
+; RV32-NEXT:    vrgatherei16.vv v12, v8, v19
 ; RV32-NEXT:    li a0, 15
 ; RV32-NEXT:    vmv.s.x v0, a0
 ; RV32-NEXT:    vsetvli zero, zero, e16, m1, ta, ma
-; RV32-NEXT:    vrsub.vi v8, v20, 3
+; RV32-NEXT:    vrsub.vi v8, v18, 3
 ; RV32-NEXT:    vsetvli zero, zero, e64, m4, ta, mu
 ; RV32-NEXT:    vrgatherei16.vv v12, v16, v8, v0.t
 ; RV32-NEXT:    vmv.v.v v8, v12
@@ -847,7 +831,6 @@ define <8 x double> @v4f64_2(<4 x double> %a, <4 x double> %b) {
 ; RV64-LABEL: v4f64_2:
 ; RV64:       # %bb.0:
 ; RV64-NEXT:    vmv2r.v v16, v10
-; RV64-NEXT:    # kill: def $v8m2 killed $v8m2 def $v8m4
 ; RV64-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV64-NEXT:    vid.v v20
 ; RV64-NEXT:    vrsub.vi v24, v20, 7


        


More information about the llvm-commits mailing list