[llvm] [GISEL][RISCV] RegBank Scalable Vector Load/Store (PR #99932)

Jiahan Xie via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 30 10:45:17 PDT 2024


https://github.com/jiahanxie353 updated https://github.com/llvm/llvm-project/pull/99932

>From 4bcdb09156284a1045d79abcc6d920f6f043415b Mon Sep 17 00:00:00 2001
From: Jiahan Xie <jx353 at cornell.edu>
Date: Mon, 22 Jul 2024 16:44:15 -0400
Subject: [PATCH 1/3] register bank select for scalable vector load/store

---
 .../RISCV/GISel/RISCVRegisterBankInfo.cpp     |   33 +-
 .../GlobalISel/regbankselect/rvv/load.mir     | 1569 ++++++++++++++++
 .../GlobalISel/regbankselect/rvv/store.mir    | 1570 +++++++++++++++++
 3 files changed, 3166 insertions(+), 6 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 43bbc8589e7e2..b9e52b7d8682d 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -310,10 +310,20 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
   switch (Opc) {
   case TargetOpcode::G_LOAD: {
     LLT Ty = MRI.getType(MI.getOperand(0).getReg());
-    OpdsMapping[0] = GPRValueMapping;
-    OpdsMapping[1] = GPRValueMapping;
+    TypeSize Size = Ty.getSizeInBits();
+    if (Ty.isVector()) {
+      OpdsMapping[0] = getVRBValueMapping(Size.getKnownMinValue());
+      OpdsMapping[1] = getVRBValueMapping(Size.getKnownMinValue());
+    } else if (isPreISelGenericFloatingPointOpcode(Opc)) {
+      OpdsMapping[0] = getFPValueMapping(Size.getFixedValue());
+      OpdsMapping[1] = getFPValueMapping(Size.getFixedValue());
+    } else {
+      OpdsMapping[0] = GPRValueMapping;
+      OpdsMapping[1] = GPRValueMapping;
+    }
     // Use FPR64 for s64 loads on rv32.
-    if (GPRSize == 32 && Ty.getSizeInBits() == 64) {
+    if (GPRSize == 32 && Ty.getSizeInBits().getKnownMinValue() == 64 &&
+        !Ty.isVector()) {
       assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
       OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
       break;
@@ -333,10 +343,21 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
   }
   case TargetOpcode::G_STORE: {
     LLT Ty = MRI.getType(MI.getOperand(0).getReg());
-    OpdsMapping[0] = GPRValueMapping;
-    OpdsMapping[1] = GPRValueMapping;
+    TypeSize Size = Ty.getSizeInBits();
+    if (Ty.isVector()) {
+      OpdsMapping[0] = getVRBValueMapping(Size.getKnownMinValue());
+      OpdsMapping[1] = getVRBValueMapping(Size.getKnownMinValue());
+    } else if (isPreISelGenericFloatingPointOpcode(Opc)) {
+      OpdsMapping[0] = getFPValueMapping(Size.getFixedValue());
+      OpdsMapping[1] = getFPValueMapping(Size.getFixedValue());
+    } else {
+      OpdsMapping[0] = GPRValueMapping;
+      OpdsMapping[1] = GPRValueMapping;
+    }
+
     // Use FPR64 for s64 stores on rv32.
-    if (GPRSize == 32 && Ty.getSizeInBits() == 64) {
+    if (GPRSize == 32 && Ty.getSizeInBits().getKnownMinValue() == 64 &&
+        !Ty.isVector()) {
       assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
       OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
       break;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
new file mode 100644
index 0000000000000..ce2bbb6441647
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
@@ -0,0 +1,1569 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefix=RV64I %s
+--- |
+
+  define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) #0 {
+    %va = load <vscale x 1 x i8>, ptr %pa, align 1
+    ret <vscale x 1 x i8> %va
+  }
+
+  define <vscale x 2 x i8> @vload_nx2i8(ptr %pa) #0 {
+    %va = load <vscale x 2 x i8>, ptr %pa, align 2
+    ret <vscale x 2 x i8> %va
+  }
+
+  define <vscale x 4 x i8> @vload_nx4i8(ptr %pa) #0 {
+    %va = load <vscale x 4 x i8>, ptr %pa, align 4
+    ret <vscale x 4 x i8> %va
+  }
+
+  define <vscale x 8 x i8> @vload_nx8i8(ptr %pa) #0 {
+    %va = load <vscale x 8 x i8>, ptr %pa, align 8
+    ret <vscale x 8 x i8> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nx16i8(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 16
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 32 x i8> @vload_nx32i8(ptr %pa) #0 {
+    %va = load <vscale x 32 x i8>, ptr %pa, align 32
+    ret <vscale x 32 x i8> %va
+  }
+
+  define <vscale x 64 x i8> @vload_nx64i8(ptr %pa) #0 {
+    %va = load <vscale x 64 x i8>, ptr %pa, align 64
+    ret <vscale x 64 x i8> %va
+  }
+
+  define <vscale x 1 x i16> @vload_nx1i16(ptr %pa) #0 {
+    %va = load <vscale x 1 x i16>, ptr %pa, align 2
+    ret <vscale x 1 x i16> %va
+  }
+
+  define <vscale x 2 x i16> @vload_nx2i16(ptr %pa) #0 {
+    %va = load <vscale x 2 x i16>, ptr %pa, align 4
+    ret <vscale x 2 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 8
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 8 x i16> @vload_nx8i16(ptr %pa) #0 {
+    %va = load <vscale x 8 x i16>, ptr %pa, align 16
+    ret <vscale x 8 x i16> %va
+  }
+
+  define <vscale x 16 x i16> @vload_nx16i16(ptr %pa) #0 {
+    %va = load <vscale x 16 x i16>, ptr %pa, align 32
+    ret <vscale x 16 x i16> %va
+  }
+
+  define <vscale x 32 x i16> @vload_nx32i16(ptr %pa) #0 {
+    %va = load <vscale x 32 x i16>, ptr %pa, align 64
+    ret <vscale x 32 x i16> %va
+  }
+
+  define <vscale x 1 x i32> @vload_nx1i32(ptr %pa) #0 {
+    %va = load <vscale x 1 x i32>, ptr %pa, align 4
+    ret <vscale x 1 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 8
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 4 x i32> @vload_nx4i32(ptr %pa) #0 {
+    %va = load <vscale x 4 x i32>, ptr %pa, align 16
+    ret <vscale x 4 x i32> %va
+  }
+
+  define <vscale x 8 x i32> @vload_nx8i32(ptr %pa) #0 {
+    %va = load <vscale x 8 x i32>, ptr %pa, align 32
+    ret <vscale x 8 x i32> %va
+  }
+
+  define <vscale x 16 x i32> @vload_nx16i32(ptr %pa) #0 {
+    %va = load <vscale x 16 x i32>, ptr %pa, align 64
+    ret <vscale x 16 x i32> %va
+  }
+
+  define <vscale x 1 x i64> @vload_nx1i64(ptr %pa) #0 {
+    %va = load <vscale x 1 x i64>, ptr %pa, align 8
+    ret <vscale x 1 x i64> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nx2i64(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 16
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 4 x i64> @vload_nx4i64(ptr %pa) #0 {
+    %va = load <vscale x 4 x i64>, ptr %pa, align 32
+    ret <vscale x 4 x i64> %va
+  }
+
+  define <vscale x 8 x i64> @vload_nx8i64(ptr %pa) #0 {
+    %va = load <vscale x 8 x i64>, ptr %pa, align 64
+    ret <vscale x 8 x i64> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nx16i8_align1(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 1
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nx16i8_align2(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 2
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nx16i8_align16(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 16
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nx16i8_align64(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 64
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16_align1(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 1
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16_align2(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 2
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16_align4(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 4
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16_align8(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 8
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16_align16(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 16
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32_align2(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 2
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32_align4(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 4
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32_align8(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 8
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32_align16(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 16
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32_align256(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 256
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nx2i64_align4(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 4
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nx2i64_align8(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 8
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nx2i64_align16(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 16
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nx2i64_align32(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 32
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 1 x ptr> @vload_nx1ptr(ptr %pa) #0 {
+    %va = load <vscale x 1 x ptr>, ptr %pa, align 4
+    ret <vscale x 1 x ptr> %va
+  }
+
+  define <vscale x 2 x ptr> @vload_nx2ptr(ptr %pa) #0 {
+    %va = load <vscale x 2 x ptr>, ptr %pa, align 8
+    ret <vscale x 2 x ptr> %va
+  }
+
+  define <vscale x 8 x ptr> @vload_nx8ptr(ptr %pa) #0 {
+    %va = load <vscale x 8 x ptr>, ptr %pa, align 32
+    ret <vscale x 8 x ptr> %va
+  }
+
+  attributes #0 = { "target-features"="+v" }
+
+...
+---
+name:            vload_nx1i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx1i8
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx1i8
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx2i8
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx2i8
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx4i8
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx4i8
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx8i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx8i8
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx8i8
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx16i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx16i8
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vload_nx16i8
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx32i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx32i8
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+    ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: vload_nx32i8
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+    ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 32 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vload_nx64i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx64i8
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+    ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: vload_nx64i8
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+    ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 64 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+    $v8m8 = COPY %1(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vload_nx1i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx1i16
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx1i16
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx2i16
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx2i16
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx4i16
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx4i16
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx8i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx8i16
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vload_nx8i16
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx16i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx16i16
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+    ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: vload_nx16i16
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+    ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vload_nx32i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx32i16
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+    ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: vload_nx32i16
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+    ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 32 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+    $v8m8 = COPY %1(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vload_nx1i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx1i32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx1i32
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx2i32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx2i32
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx4i32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vload_nx4i32
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx8i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx8i32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+    ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: vload_nx8i32
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+    ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vload_nx16i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx16i32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+    ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: vload_nx16i32
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+    ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+    $v8m8 = COPY %1(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vload_nx1i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx1i64
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx1i64
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx2i64
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vload_nx2i64
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx4i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx4i64
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+    ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: vload_nx4i64
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+    ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vload_nx8i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx8i64
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+    ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m8
+    ;
+    ; RV64I-LABEL: name: vload_nx8i64
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+    ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+    $v8m8 = COPY %1(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vload_nx16i8_align1
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx16i8_align1
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vload_nx16i8_align1
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx16i8_align2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx16i8_align2
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vload_nx16i8_align2
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx16i8_align16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx16i8_align16
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vload_nx16i8_align16
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx16i8_align64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx16i8_align64
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vload_nx16i8_align64
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx4i16_align1
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx4i16_align1
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+    ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+    ; RV32I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx4i16_align1
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+    ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+    ; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+    %1:_(<vscale x 4 x s16>) = G_BITCAST %2(<vscale x 8 x s8>)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i16_align2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx4i16_align2
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx4i16_align2
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i16_align4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx4i16_align4
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx4i16_align4
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i16_align8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx4i16_align8
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx4i16_align8
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i16_align16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx4i16_align16
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx4i16_align16
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32_align2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx2i32_align2
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+    ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+    ; RV32I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx2i32_align2
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+    ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+    ; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+    %1:_(<vscale x 2 x s32>) = G_BITCAST %2(<vscale x 8 x s8>)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32_align4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx2i32_align4
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx2i32_align4
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32_align8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx2i32_align8
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx2i32_align8
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32_align16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx2i32_align16
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx2i32_align16
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32_align256
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx2i32_align256
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx2i32_align256
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i64_align4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx2i64_align4
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+    ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
+    ; RV32I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vload_nx2i64_align4
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+    ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
+    ; RV64I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %2:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+    %1:_(<vscale x 2 x s64>) = G_BITCAST %2(<vscale x 16 x s8>)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx2i64_align8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx2i64_align8
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vload_nx2i64_align8
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx2i64_align16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx2i64_align16
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vload_nx2i64_align16
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx2i64_align32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx2i64_align32
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+    ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m2
+    ;
+    ; RV64I-LABEL: name: vload_nx2i64_align32
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+    ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx1ptr
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx1ptr
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx1ptr
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x p0>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2ptr
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx2ptr
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx2ptr
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x p0>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx8ptr
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx8ptr
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+    ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
+    ; RV32I-NEXT: PseudoRET implicit $v8m4
+    ;
+    ; RV64I-LABEL: name: vload_nx8ptr
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+    ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
+    ; RV64I-NEXT: PseudoRET implicit $v8m4
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 8 x p0>)
+    PseudoRET implicit $v8m4
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
new file mode 100644
index 0000000000000..6c8c3f73fb580
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
@@ -0,0 +1,1570 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefix=RV64I %s
+
+--- |
+
+  define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) #0 {
+    store <vscale x 1 x i8> %b, ptr %pa, align 1
+    ret void
+  }
+
+  define void @vstore_nx2i8(ptr %pa, <vscale x 2 x i8> %b) #0 {
+    store <vscale x 2 x i8> %b, ptr %pa, align 2
+    ret void
+  }
+
+  define void @vstore_nx4i8(ptr %pa, <vscale x 4 x i8> %b) #0 {
+    store <vscale x 4 x i8> %b, ptr %pa, align 4
+    ret void
+  }
+
+  define void @vstore_nx8i8(ptr %pa, <vscale x 8 x i8> %b) #0 {
+    store <vscale x 8 x i8> %b, ptr %pa, align 8
+    ret void
+  }
+
+  define void @vstore_nx16i8(ptr %pa, <vscale x 16 x i8> %b) #0 {
+    store <vscale x 16 x i8> %b, ptr %pa, align 16
+    ret void
+  }
+
+  define void @vstore_nx32i8(ptr %pa, <vscale x 32 x i8> %b) #0 {
+    store <vscale x 32 x i8> %b, ptr %pa, align 32
+    ret void
+  }
+
+  define void @vstore_nx64i8(ptr %pa, <vscale x 64 x i8> %b) #0 {
+    store <vscale x 64 x i8> %b, ptr %pa, align 64
+    ret void
+  }
+
+  define void @vstore_nx1i16(ptr %pa, <vscale x 1 x i16> %b) #0 {
+    store <vscale x 1 x i16> %b, ptr %pa, align 2
+    ret void
+  }
+
+  define void @vstore_nx2i16(ptr %pa, <vscale x 2 x i16> %b) #0 {
+    store <vscale x 2 x i16> %b, ptr %pa, align 4
+    ret void
+  }
+
+  define void @vstore_nx4i16(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 8
+    ret void
+  }
+
+  define void @vstore_nx8i16(ptr %pa, <vscale x 8 x i16> %b) #0 {
+    store <vscale x 8 x i16> %b, ptr %pa, align 16
+    ret void
+  }
+
+  define void @vstore_nx16i16(ptr %pa, <vscale x 16 x i16> %b) #0 {
+    store <vscale x 16 x i16> %b, ptr %pa, align 32
+    ret void
+  }
+
+  define void @vstore_nx32i16(ptr %pa, <vscale x 32 x i16> %b) #0 {
+    store <vscale x 32 x i16> %b, ptr %pa, align 64
+    ret void
+  }
+
+  define void @vstore_nx1i32(ptr %pa, <vscale x 1 x i32> %b) #0 {
+    store <vscale x 1 x i32> %b, ptr %pa, align 4
+    ret void
+  }
+
+  define void @vstore_nx2i32(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 8
+    ret void
+  }
+
+  define void @vstore_nx4i32(ptr %pa, <vscale x 4 x i32> %b) #0 {
+    store <vscale x 4 x i32> %b, ptr %pa, align 16
+    ret void
+  }
+
+  define void @vstore_nx8i32(ptr %pa, <vscale x 8 x i32> %b) #0 {
+    store <vscale x 8 x i32> %b, ptr %pa, align 32
+    ret void
+  }
+
+  define void @vstore_nx16i32(ptr %pa, <vscale x 16 x i32> %b) #0 {
+    store <vscale x 16 x i32> %b, ptr %pa, align 64
+    ret void
+  }
+
+  define void @vstore_nx1i64(ptr %pa, <vscale x 1 x i64> %b) #0 {
+    store <vscale x 1 x i64> %b, ptr %pa, align 8
+    ret void
+  }
+
+  define void @vstore_nx2i64(ptr %pa, <vscale x 2 x i64> %b) #0 {
+    store <vscale x 2 x i64> %b, ptr %pa, align 16
+    ret void
+  }
+
+  define void @vstore_nx4i64(ptr %pa, <vscale x 4 x i64> %b) #0 {
+    store <vscale x 4 x i64> %b, ptr %pa, align 32
+    ret void
+  }
+
+  define void @vstore_nx8i64(ptr %pa, <vscale x 8 x i64> %b) #0 {
+    store <vscale x 8 x i64> %b, ptr %pa, align 64
+    ret void
+  }
+
+  define void @vstore_nx16i8_align1(ptr %pa, <vscale x 16 x i8> %b) #0 {
+    store <vscale x 16 x i8> %b, ptr %pa, align 1
+    ret void
+  }
+
+  define void @vstore_nx16i8_align2(ptr %pa, <vscale x 16 x i8> %b) #0 {
+    store <vscale x 16 x i8> %b, ptr %pa, align 2
+    ret void
+  }
+
+  define void @vstore_nx16i8_align16(ptr %pa, <vscale x 16 x i8> %b) #0 {
+    store <vscale x 16 x i8> %b, ptr %pa, align 16
+    ret void
+  }
+
+  define void @vstore_nx16i8_align64(ptr %pa, <vscale x 16 x i8> %b) #0 {
+    store <vscale x 16 x i8> %b, ptr %pa, align 64
+    ret void
+  }
+
+  define void @vstore_nx4i16_align1(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 1
+    ret void
+  }
+
+  define void @vstore_nx4i16_align2(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 2
+    ret void
+  }
+
+  define void @vstore_nx4i16_align4(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 4
+    ret void
+  }
+
+  define void @vstore_nx4i16_align8(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 8
+    ret void
+  }
+
+  define void @vstore_nx4i16_align16(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 16
+    ret void
+  }
+
+  define void @vstore_nx2i32_align2(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 2
+    ret void
+  }
+
+  define void @vstore_nx2i32_align4(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 4
+    ret void
+  }
+
+  define void @vstore_nx2i32_align8(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 8
+    ret void
+  }
+
+  define void @vstore_nx2i32_align16(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 16
+    ret void
+  }
+
+  define void @vstore_nx2i32_align256(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 256
+    ret void
+  }
+
+  define void @vstore_nx2i64_align4(ptr %pa, <vscale x 2 x i64> %b) #0 {
+    store <vscale x 2 x i64> %b, ptr %pa, align 4
+    ret void
+  }
+
+  define void @vstore_nx2i64_align8(ptr %pa, <vscale x 2 x i64> %b) #0 {
+    store <vscale x 2 x i64> %b, ptr %pa, align 8
+    ret void
+  }
+
+  define void @vstore_nx2i64_align16(ptr %pa, <vscale x 2 x i64> %b) #0 {
+    store <vscale x 2 x i64> %b, ptr %pa, align 16
+    ret void
+  }
+
+  define void @vstore_nx2i64_align32(ptr %pa, <vscale x 2 x i64> %b) #0 {
+    store <vscale x 2 x i64> %b, ptr %pa, align 32
+    ret void
+  }
+
+  define void @vstore_nx1ptr(ptr %pa, <vscale x 1 x ptr> %b) #0 {
+    store <vscale x 1 x ptr> %b, ptr %pa, align 4
+    ret void
+  }
+
+  define void @vstore_nx2ptr(ptr %pa, <vscale x 2 x ptr> %b) #0 {
+    store <vscale x 2 x ptr> %b, ptr %pa, align 8
+    ret void
+  }
+
+  define void @vstore_nx8ptr(ptr %pa, <vscale x 8 x ptr> %b) #0 {
+    store <vscale x 8 x ptr> %b, ptr %pa, align 32
+    ret void
+  }
+
+  attributes #0 = { "target-features"="+v" }
+
+...
+---
+name:            vstore_nx1i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx1i8
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY2]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx1i8
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY2]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s8>) = COPY $v8
+    G_STORE %1(<vscale x 1 x s8>), %0(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx2i8
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY2]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx2i8
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY2]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s8>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s8>), %0(p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx4i8
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY2]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx4i8
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY2]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s8>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s8>), %0(p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx8i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx8i8
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx8i8
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s8>) = COPY $v8
+    G_STORE %1(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; RV32I-LABEL: name: vstore_nx16i8
+    ; RV32I: liveins: $x10, $v8m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx16i8
+    ; RV64I: liveins: $x10, $v8m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = COPY $v8m2
+    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx32i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m4
+
+    ; RV32I-LABEL: name: vstore_nx32i8
+    ; RV32I: liveins: $x10, $v8m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY2]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx32i8
+    ; RV64I: liveins: $x10, $v8m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY2]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 32 x s8>) = COPY $v8m4
+    G_STORE %1(<vscale x 32 x s8>), %0(p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx64i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m8
+
+    ; RV32I-LABEL: name: vstore_nx64i8
+    ; RV32I: liveins: $x10, $v8m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY2]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx64i8
+    ; RV64I: liveins: $x10, $v8m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY2]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 64 x s8>) = COPY $v8m8
+    G_STORE %1(<vscale x 64 x s8>), %0(p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx1i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx1i16
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY2]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx1i16
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY2]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 1 x s16>), %0(p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx2i16
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY2]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx2i16
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY2]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s16>), %0(p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx4i16
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx4i16
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx8i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; RV32I-LABEL: name: vstore_nx8i16
+    ; RV32I: liveins: $x10, $v8m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY2]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx8i16
+    ; RV64I: liveins: $x10, $v8m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY2]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s16>) = COPY $v8m2
+    G_STORE %1(<vscale x 8 x s16>), %0(p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m4
+
+    ; RV32I-LABEL: name: vstore_nx16i16
+    ; RV32I: liveins: $x10, $v8m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY2]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx16i16
+    ; RV64I: liveins: $x10, $v8m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY2]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s16>) = COPY $v8m4
+    G_STORE %1(<vscale x 16 x s16>), %0(p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx32i16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m8
+
+    ; RV32I-LABEL: name: vstore_nx32i16
+    ; RV32I: liveins: $x10, $v8m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY2]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx32i16
+    ; RV64I: liveins: $x10, $v8m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY2]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 32 x s16>) = COPY $v8m8
+    G_STORE %1(<vscale x 32 x s16>), %0(p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx1i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx1i32
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY2]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx1i32
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY2]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 1 x s32>), %0(p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx2i32
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx2i32
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; RV32I-LABEL: name: vstore_nx4i32
+    ; RV32I: liveins: $x10, $v8m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY2]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx4i32
+    ; RV64I: liveins: $x10, $v8m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY2]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s32>) = COPY $v8m2
+    G_STORE %1(<vscale x 4 x s32>), %0(p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx8i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m4
+
+    ; RV32I-LABEL: name: vstore_nx8i32
+    ; RV32I: liveins: $x10, $v8m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY2]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx8i32
+    ; RV64I: liveins: $x10, $v8m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY2]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s32>) = COPY $v8m4
+    G_STORE %1(<vscale x 8 x s32>), %0(p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m8
+
+    ; RV32I-LABEL: name: vstore_nx16i32
+    ; RV32I: liveins: $x10, $v8m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY2]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx16i32
+    ; RV64I: liveins: $x10, $v8m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY2]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s32>) = COPY $v8m8
+    G_STORE %1(<vscale x 16 x s32>), %0(p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx1i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx1i64
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY2]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx1i64
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY2]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s64>) = COPY $v8
+    G_STORE %1(<vscale x 1 x s64>), %0(p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; RV32I-LABEL: name: vstore_nx2i64
+    ; RV32I: liveins: $x10, $v8m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx2i64
+    ; RV64I: liveins: $x10, $v8m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = COPY $v8m2
+    G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m4
+
+    ; RV32I-LABEL: name: vstore_nx4i64
+    ; RV32I: liveins: $x10, $v8m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY2]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx4i64
+    ; RV64I: liveins: $x10, $v8m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY2]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s64>) = COPY $v8m4
+    G_STORE %1(<vscale x 4 x s64>), %0(p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx8i64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m8
+
+    ; RV32I-LABEL: name: vstore_nx8i64
+    ; RV32I: liveins: $x10, $v8m8
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY2]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx8i64
+    ; RV64I: liveins: $x10, $v8m8
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY2]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s64>) = COPY $v8m8
+    G_STORE %1(<vscale x 8 x s64>), %0(p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i8_align1
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; RV32I-LABEL: name: vstore_nx16i8_align1
+    ; RV32I: liveins: $x10, $v8m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx16i8_align1
+    ; RV64I: liveins: $x10, $v8m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = COPY $v8m2
+    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i8_align2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; RV32I-LABEL: name: vstore_nx16i8_align2
+    ; RV32I: liveins: $x10, $v8m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx16i8_align2
+    ; RV64I: liveins: $x10, $v8m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = COPY $v8m2
+    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i8_align16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; RV32I-LABEL: name: vstore_nx16i8_align16
+    ; RV32I: liveins: $x10, $v8m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx16i8_align16
+    ; RV64I: liveins: $x10, $v8m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = COPY $v8m2
+    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i8_align64
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; RV32I-LABEL: name: vstore_nx16i8_align64
+    ; RV32I: liveins: $x10, $v8m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx16i8_align64
+    ; RV64I: liveins: $x10, $v8m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = COPY $v8m2
+    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16_align1
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx4i16_align1
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx4i16_align1
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = COPY $v8
+    %2:_(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 4 x s16>)
+    G_STORE %2(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16_align2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx4i16_align2
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx4i16_align2
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16_align4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx4i16_align4
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx4i16_align4
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16_align8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx4i16_align8
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx4i16_align8
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16_align16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx4i16_align16
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx4i16_align16
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32_align2
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx2i32_align2
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx2i32_align2
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = COPY $v8
+    %2:_(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 2 x s32>)
+    G_STORE %2(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32_align4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx2i32_align4
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx2i32_align4
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32_align8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx2i32_align8
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx2i32_align8
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32_align16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx2i32_align16
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx2i32_align16
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32_align256
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx2i32_align256
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx2i32_align256
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i64_align4
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; RV32I-LABEL: name: vstore_nx2i64_align4
+    ; RV32I: liveins: $x10, $v8m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx2i64_align4
+    ; RV64I: liveins: $x10, $v8m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = COPY $v8m2
+    %2:_(<vscale x 16 x s8>) = G_BITCAST %1(<vscale x 2 x s64>)
+    G_STORE %2(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i64_align8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; RV32I-LABEL: name: vstore_nx2i64_align8
+    ; RV32I: liveins: $x10, $v8m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx2i64_align8
+    ; RV64I: liveins: $x10, $v8m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = COPY $v8m2
+    G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i64_align16
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; RV32I-LABEL: name: vstore_nx2i64_align16
+    ; RV32I: liveins: $x10, $v8m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx2i64_align16
+    ; RV64I: liveins: $x10, $v8m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = COPY $v8m2
+    G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i64_align32
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; RV32I-LABEL: name: vstore_nx2i64_align32
+    ; RV32I: liveins: $x10, $v8m2
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx2i64_align32
+    ; RV64I: liveins: $x10, $v8m2
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = COPY $v8m2
+    G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+    PseudoRET
+
+...
+---
+name:            vstore_nx1ptr
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx1ptr
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x p0>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY2]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx1ptr
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x p0>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY2]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x p0>) = COPY $v8
+    G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2ptr
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; RV32I-LABEL: name: vstore_nx2ptr
+    ; RV32I: liveins: $v8, $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x p0>) = COPY $v8
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY2]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx2ptr
+    ; RV64I: liveins: $v8, $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x p0>) = COPY $v8
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY2]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x p0>) = COPY $v8
+    G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx8ptr
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m4
+
+    ; RV32I-LABEL: name: vstore_nx8ptr
+    ; RV32I: liveins: $x10, $v8m4
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x p0>) = COPY $v8m4
+    ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY2]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+    ; RV32I-NEXT: PseudoRET
+    ;
+    ; RV64I-LABEL: name: vstore_nx8ptr
+    ; RV64I: liveins: $x10, $v8m4
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x p0>) = COPY $v8m4
+    ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY2]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+    ; RV64I-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x p0>) = COPY $v8m4
+    G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+    PseudoRET
+
+...

>From 63fcb5fc25fc2faf84a49af7d729e1835f0e8cb3 Mon Sep 17 00:00:00 2001
From: Jiahan Xie <jx353 at cornell.edu>
Date: Tue, 30 Jul 2024 12:01:44 -0400
Subject: [PATCH 2/3] made some changes based on review

---
 .../Target/RISCV/GISel/RISCVRegisterBankInfo.cpp   | 14 ++++----------
 1 file changed, 4 insertions(+), 10 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index b9e52b7d8682d..6d7aeee83b0c6 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -314,16 +314,13 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     if (Ty.isVector()) {
       OpdsMapping[0] = getVRBValueMapping(Size.getKnownMinValue());
       OpdsMapping[1] = getVRBValueMapping(Size.getKnownMinValue());
-    } else if (isPreISelGenericFloatingPointOpcode(Opc)) {
-      OpdsMapping[0] = getFPValueMapping(Size.getFixedValue());
-      OpdsMapping[1] = getFPValueMapping(Size.getFixedValue());
     } else {
       OpdsMapping[0] = GPRValueMapping;
       OpdsMapping[1] = GPRValueMapping;
     }
     // Use FPR64 for s64 loads on rv32.
-    if (GPRSize == 32 && Ty.getSizeInBits().getKnownMinValue() == 64 &&
-        !Ty.isVector()) {
+    if (!Ty.isVector() && GPRSize == 32 &&
+        Ty.getSizeInBits().getKnownMinValue() == 64) {
       assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
       OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
       break;
@@ -347,17 +344,14 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
     if (Ty.isVector()) {
       OpdsMapping[0] = getVRBValueMapping(Size.getKnownMinValue());
       OpdsMapping[1] = getVRBValueMapping(Size.getKnownMinValue());
-    } else if (isPreISelGenericFloatingPointOpcode(Opc)) {
-      OpdsMapping[0] = getFPValueMapping(Size.getFixedValue());
-      OpdsMapping[1] = getFPValueMapping(Size.getFixedValue());
     } else {
       OpdsMapping[0] = GPRValueMapping;
       OpdsMapping[1] = GPRValueMapping;
     }
 
     // Use FPR64 for s64 stores on rv32.
-    if (GPRSize == 32 && Ty.getSizeInBits().getKnownMinValue() == 64 &&
-        !Ty.isVector()) {
+    if (!Ty.isVector() && GPRSize == 32 &&
+        Ty.getSizeInBits().getKnownMinValue() == 64) {
       assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
       OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
       break;

>From 9934faddd43a26ec51683aeca9bce2fd848f55f2 Mon Sep 17 00:00:00 2001
From: Jiahan Xie <jx353 at cornell.edu>
Date: Tue, 30 Jul 2024 13:43:54 -0400
Subject: [PATCH 3/3] use type size instead of unsigned when getting value
 mapping

---
 llvm/lib/CodeGen/RegisterBankInfo.cpp         |   5 +-
 .../GlobalISel/regbankselect/rvv/load.mir     | 221 ++++++++-----
 .../GlobalISel/regbankselect/rvv/store.mir    | 308 ++++++++++--------
 3 files changed, 310 insertions(+), 224 deletions(-)

diff --git a/llvm/lib/CodeGen/RegisterBankInfo.cpp b/llvm/lib/CodeGen/RegisterBankInfo.cpp
index 72b07eb1902d9..00dcc1fbcd0c7 100644
--- a/llvm/lib/CodeGen/RegisterBankInfo.cpp
+++ b/llvm/lib/CodeGen/RegisterBankInfo.cpp
@@ -215,8 +215,9 @@ RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const {
       }
     }
 
-    unsigned Size = getSizeInBits(Reg, MRI, TRI);
-    const ValueMapping *ValMapping = &getValueMapping(0, Size, *CurRegBank);
+    TypeSize Size = getSizeInBits(Reg, MRI, TRI);
+    const ValueMapping *ValMapping =
+        &getValueMapping(0, Size.getKnownMinValue(), *CurRegBank);
     if (IsCopyLike) {
       if (!OperandsMapping[0]) {
         if (MI.isRegSequence()) {
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
index ce2bbb6441647..f542d1b7e6a5d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
@@ -250,8 +250,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 1 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 1 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
     $v8 = COPY %1(<vscale x 1 x s8>)
     PseudoRET implicit $v8
 
@@ -281,8 +282,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 2 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
     $v8 = COPY %1(<vscale x 2 x s8>)
     PseudoRET implicit $v8
 
@@ -312,8 +314,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 4 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
     $v8 = COPY %1(<vscale x 4 x s8>)
     PseudoRET implicit $v8
 
@@ -343,8 +346,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 8 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
     $v8 = COPY %1(<vscale x 8 x s8>)
     PseudoRET implicit $v8
 
@@ -374,8 +378,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
     ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
     ; RV64I-NEXT: PseudoRET implicit $v8m2
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 16 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
     $v8m2 = COPY %1(<vscale x 16 x s8>)
     PseudoRET implicit $v8m2
 
@@ -405,8 +410,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
     ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
     ; RV64I-NEXT: PseudoRET implicit $v8m4
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 32 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 32 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
     $v8m4 = COPY %1(<vscale x 32 x s8>)
     PseudoRET implicit $v8m4
 
@@ -436,8 +442,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
     ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
     ; RV64I-NEXT: PseudoRET implicit $v8m8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 64 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 64 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
     $v8m8 = COPY %1(<vscale x 64 x s8>)
     PseudoRET implicit $v8m8
 
@@ -467,8 +474,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 1 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 1 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
     $v8 = COPY %1(<vscale x 1 x s16>)
     PseudoRET implicit $v8
 
@@ -498,8 +506,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 2 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
     $v8 = COPY %1(<vscale x 2 x s16>)
     PseudoRET implicit $v8
 
@@ -529,8 +538,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 4 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
     $v8 = COPY %1(<vscale x 4 x s16>)
     PseudoRET implicit $v8
 
@@ -560,8 +570,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
     ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
     ; RV64I-NEXT: PseudoRET implicit $v8m2
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 8 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 8 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
     $v8m2 = COPY %1(<vscale x 8 x s16>)
     PseudoRET implicit $v8m2
 
@@ -591,8 +602,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
     ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
     ; RV64I-NEXT: PseudoRET implicit $v8m4
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 16 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 16 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
     $v8m4 = COPY %1(<vscale x 16 x s16>)
     PseudoRET implicit $v8m4
 
@@ -622,8 +634,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
     ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
     ; RV64I-NEXT: PseudoRET implicit $v8m8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 32 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 32 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
     $v8m8 = COPY %1(<vscale x 32 x s16>)
     PseudoRET implicit $v8m8
 
@@ -653,8 +666,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 1 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 1 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
     $v8 = COPY %1(<vscale x 1 x s32>)
     PseudoRET implicit $v8
 
@@ -684,8 +698,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 2 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
     $v8 = COPY %1(<vscale x 2 x s32>)
     PseudoRET implicit $v8
 
@@ -715,8 +730,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
     ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
     ; RV64I-NEXT: PseudoRET implicit $v8m2
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 4 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
     $v8m2 = COPY %1(<vscale x 4 x s32>)
     PseudoRET implicit $v8m2
 
@@ -746,8 +762,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
     ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
     ; RV64I-NEXT: PseudoRET implicit $v8m4
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 8 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 8 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
     $v8m4 = COPY %1(<vscale x 8 x s32>)
     PseudoRET implicit $v8m4
 
@@ -777,8 +794,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
     ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
     ; RV64I-NEXT: PseudoRET implicit $v8m8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 16 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 16 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
     $v8m8 = COPY %1(<vscale x 16 x s32>)
     PseudoRET implicit $v8m8
 
@@ -808,8 +826,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 1 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 1 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
     $v8 = COPY %1(<vscale x 1 x s64>)
     PseudoRET implicit $v8
 
@@ -839,8 +858,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
     ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
     ; RV64I-NEXT: PseudoRET implicit $v8m2
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 2 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
     PseudoRET implicit $v8m2
 
@@ -870,8 +890,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
     ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
     ; RV64I-NEXT: PseudoRET implicit $v8m4
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 4 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
     $v8m4 = COPY %1(<vscale x 4 x s64>)
     PseudoRET implicit $v8m4
 
@@ -901,8 +922,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
     ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
     ; RV64I-NEXT: PseudoRET implicit $v8m8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 8 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 8 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
     $v8m8 = COPY %1(<vscale x 8 x s64>)
     PseudoRET implicit $v8m8
 
@@ -932,8 +954,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
     ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
     ; RV64I-NEXT: PseudoRET implicit $v8m2
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 16 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
     $v8m2 = COPY %1(<vscale x 16 x s8>)
     PseudoRET implicit $v8m2
 
@@ -963,8 +986,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
     ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
     ; RV64I-NEXT: PseudoRET implicit $v8m2
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 16 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
     $v8m2 = COPY %1(<vscale x 16 x s8>)
     PseudoRET implicit $v8m2
 
@@ -994,8 +1018,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
     ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
     ; RV64I-NEXT: PseudoRET implicit $v8m2
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 16 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
     $v8m2 = COPY %1(<vscale x 16 x s8>)
     PseudoRET implicit $v8m2
 
@@ -1025,8 +1050,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
     ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
     ; RV64I-NEXT: PseudoRET implicit $v8m2
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 16 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
     $v8m2 = COPY %1(<vscale x 16 x s8>)
     PseudoRET implicit $v8m2
 
@@ -1058,9 +1084,10 @@ body:             |
     ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
     ; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
-    %1:_(<vscale x 4 x s16>) = G_BITCAST %2(<vscale x 8 x s8>)
+    %0:gprb(p0) = COPY $x10
+    %3:vrb(p0) = COPY %0(p0)
+    %2:vrb(<vscale x 8 x s8>) = G_LOAD %3(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+    %1:vrb(<vscale x 4 x s16>) = G_BITCAST %2(<vscale x 8 x s8>)
     $v8 = COPY %1(<vscale x 4 x s16>)
     PseudoRET implicit $v8
 
@@ -1090,8 +1117,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 4 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
     $v8 = COPY %1(<vscale x 4 x s16>)
     PseudoRET implicit $v8
 
@@ -1121,8 +1149,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 4 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
     $v8 = COPY %1(<vscale x 4 x s16>)
     PseudoRET implicit $v8
 
@@ -1152,8 +1181,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 4 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
     $v8 = COPY %1(<vscale x 4 x s16>)
     PseudoRET implicit $v8
 
@@ -1183,8 +1213,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 4 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
     $v8 = COPY %1(<vscale x 4 x s16>)
     PseudoRET implicit $v8
 
@@ -1216,9 +1247,10 @@ body:             |
     ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
     ; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
-    %1:_(<vscale x 2 x s32>) = G_BITCAST %2(<vscale x 8 x s8>)
+    %0:gprb(p0) = COPY $x10
+    %3:vrb(p0) = COPY %0(p0)
+    %2:vrb(<vscale x 8 x s8>) = G_LOAD %3(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+    %1:vrb(<vscale x 2 x s32>) = G_BITCAST %2(<vscale x 8 x s8>)
     $v8 = COPY %1(<vscale x 2 x s32>)
     PseudoRET implicit $v8
 
@@ -1248,8 +1280,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 2 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
     $v8 = COPY %1(<vscale x 2 x s32>)
     PseudoRET implicit $v8
 
@@ -1279,8 +1312,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 2 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
     $v8 = COPY %1(<vscale x 2 x s32>)
     PseudoRET implicit $v8
 
@@ -1310,8 +1344,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 2 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
     $v8 = COPY %1(<vscale x 2 x s32>)
     PseudoRET implicit $v8
 
@@ -1341,8 +1376,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 2 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
     $v8 = COPY %1(<vscale x 2 x s32>)
     PseudoRET implicit $v8
 
@@ -1374,9 +1410,10 @@ body:             |
     ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
     ; RV64I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
     ; RV64I-NEXT: PseudoRET implicit $v8m2
-    %0:_(p0) = COPY $x10
-    %2:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
-    %1:_(<vscale x 2 x s64>) = G_BITCAST %2(<vscale x 16 x s8>)
+    %0:gprb(p0) = COPY $x10
+    %3:vrb(p0) = COPY %0(p0)
+    %2:vrb(<vscale x 16 x s8>) = G_LOAD %3(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+    %1:vrb(<vscale x 2 x s64>) = G_BITCAST %2(<vscale x 16 x s8>)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
     PseudoRET implicit $v8m2
 
@@ -1406,8 +1443,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
     ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
     ; RV64I-NEXT: PseudoRET implicit $v8m2
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 2 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
     PseudoRET implicit $v8m2
 
@@ -1437,8 +1475,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
     ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
     ; RV64I-NEXT: PseudoRET implicit $v8m2
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 2 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
     PseudoRET implicit $v8m2
 
@@ -1468,8 +1507,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
     ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
     ; RV64I-NEXT: PseudoRET implicit $v8m2
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 2 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
     $v8m2 = COPY %1(<vscale x 2 x s64>)
     PseudoRET implicit $v8m2
 
@@ -1499,8 +1539,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 1 x p0>) = G_LOAD %2(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
     $v8 = COPY %1(<vscale x 1 x p0>)
     PseudoRET implicit $v8
 
@@ -1530,8 +1571,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
     ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
     ; RV64I-NEXT: PseudoRET implicit $v8
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 2 x p0>) = G_LOAD %2(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
     $v8 = COPY %1(<vscale x 2 x p0>)
     PseudoRET implicit $v8
 
@@ -1561,8 +1603,9 @@ body:             |
     ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
     ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
     ; RV64I-NEXT: PseudoRET implicit $v8m4
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 8 x p0>) = G_LOAD %2(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
     $v8m4 = COPY %1(<vscale x 8 x p0>)
     PseudoRET implicit $v8m4
 
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
index 6c8c3f73fb580..83cf77af25765 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
@@ -5,7 +5,6 @@
 # RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
 # RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
 # RUN:   -o - | FileCheck -check-prefix=RV64I %s
-
 --- |
 
   define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) #0 {
@@ -251,9 +250,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY2]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 1 x s8>) = COPY $v8
-    G_STORE %1(<vscale x 1 x s8>), %0(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 1 x s8>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 1 x s8>), %2(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
     PseudoRET
 
 ...
@@ -282,9 +282,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY2]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s8>) = COPY $v8
-    G_STORE %1(<vscale x 2 x s8>), %0(p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s8>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 2 x s8>), %2(p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
     PseudoRET
 
 ...
@@ -313,9 +314,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY2]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s8>) = COPY $v8
-    G_STORE %1(<vscale x 4 x s8>), %0(p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s8>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 4 x s8>), %2(p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
     PseudoRET
 
 ...
@@ -344,9 +346,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 8 x s8>) = COPY $v8
-    G_STORE %1(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 8 x s8>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 8 x s8>), %2(p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
     PseudoRET
 
 ...
@@ -375,9 +378,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 16 x s8>) = COPY $v8m2
-    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 16 x s8>), %2(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
     PseudoRET
 
 ...
@@ -406,9 +410,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY2]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 32 x s8>) = COPY $v8m4
-    G_STORE %1(<vscale x 32 x s8>), %0(p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 32 x s8>) = COPY $v8m4
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 32 x s8>), %2(p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
     PseudoRET
 
 ...
@@ -437,9 +442,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY2]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 64 x s8>) = COPY $v8m8
-    G_STORE %1(<vscale x 64 x s8>), %0(p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 64 x s8>) = COPY $v8m8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 64 x s8>), %2(p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
     PseudoRET
 
 ...
@@ -468,9 +474,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY2]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 1 x s16>) = COPY $v8
-    G_STORE %1(<vscale x 1 x s16>), %0(p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 1 x s16>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 1 x s16>), %2(p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
     PseudoRET
 
 ...
@@ -499,9 +506,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY2]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s16>) = COPY $v8
-    G_STORE %1(<vscale x 2 x s16>), %0(p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s16>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 2 x s16>), %2(p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
     PseudoRET
 
 ...
@@ -530,9 +538,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s16>) = COPY $v8
-    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 4 x s16>), %2(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
     PseudoRET
 
 ...
@@ -561,9 +570,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY2]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 8 x s16>) = COPY $v8m2
-    G_STORE %1(<vscale x 8 x s16>), %0(p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 8 x s16>), %2(p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
     PseudoRET
 
 ...
@@ -592,9 +602,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY2]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 16 x s16>) = COPY $v8m4
-    G_STORE %1(<vscale x 16 x s16>), %0(p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s16>) = COPY $v8m4
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 16 x s16>), %2(p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
     PseudoRET
 
 ...
@@ -623,9 +634,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY2]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 32 x s16>) = COPY $v8m8
-    G_STORE %1(<vscale x 32 x s16>), %0(p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 32 x s16>) = COPY $v8m8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 32 x s16>), %2(p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
     PseudoRET
 
 ...
@@ -654,9 +666,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY2]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 1 x s32>) = COPY $v8
-    G_STORE %1(<vscale x 1 x s32>), %0(p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 1 x s32>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 1 x s32>), %2(p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
     PseudoRET
 
 ...
@@ -685,9 +698,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s32>) = COPY $v8
-    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 2 x s32>), %2(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
     PseudoRET
 
 ...
@@ -716,9 +730,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY2]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s32>) = COPY $v8m2
-    G_STORE %1(<vscale x 4 x s32>), %0(p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s32>) = COPY $v8m2
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 4 x s32>), %2(p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
     PseudoRET
 
 ...
@@ -747,9 +762,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY2]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 8 x s32>) = COPY $v8m4
-    G_STORE %1(<vscale x 8 x s32>), %0(p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 8 x s32>) = COPY $v8m4
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 8 x s32>), %2(p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
     PseudoRET
 
 ...
@@ -778,9 +794,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY2]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 16 x s32>) = COPY $v8m8
-    G_STORE %1(<vscale x 16 x s32>), %0(p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s32>) = COPY $v8m8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 16 x s32>), %2(p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
     PseudoRET
 
 ...
@@ -809,9 +826,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY2]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 1 x s64>) = COPY $v8
-    G_STORE %1(<vscale x 1 x s64>), %0(p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 1 x s64>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 1 x s64>), %2(p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
     PseudoRET
 
 ...
@@ -840,9 +858,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s64>) = COPY $v8m2
-    G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 2 x s64>), %2(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
     PseudoRET
 
 ...
@@ -871,9 +890,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY2]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s64>) = COPY $v8m4
-    G_STORE %1(<vscale x 4 x s64>), %0(p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s64>) = COPY $v8m4
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 4 x s64>), %2(p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
     PseudoRET
 
 ...
@@ -902,9 +922,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY2]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 8 x s64>) = COPY $v8m8
-    G_STORE %1(<vscale x 8 x s64>), %0(p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 8 x s64>) = COPY $v8m8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 8 x s64>), %2(p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
     PseudoRET
 
 ...
@@ -933,9 +954,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 16 x s8>) = COPY $v8m2
-    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 16 x s8>), %2(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
     PseudoRET
 
 ...
@@ -964,9 +986,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 16 x s8>) = COPY $v8m2
-    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 16 x s8>), %2(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
     PseudoRET
 
 ...
@@ -995,9 +1018,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 16 x s8>) = COPY $v8m2
-    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 16 x s8>), %2(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
     PseudoRET
 
 ...
@@ -1026,9 +1050,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 16 x s8>) = COPY $v8m2
-    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 16 x s8>), %2(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
     PseudoRET
 
 ...
@@ -1059,10 +1084,11 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s16>) = COPY $v8
-    %2:_(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 4 x s16>)
-    G_STORE %2(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = COPY $v8
+    %2:vrb(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 4 x s16>)
+    %3:vrb(p0) = COPY %0(p0)
+    G_STORE %2(<vscale x 8 x s8>), %3(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
     PseudoRET
 
 ...
@@ -1091,9 +1117,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s16>) = COPY $v8
-    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 4 x s16>), %2(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
     PseudoRET
 
 ...
@@ -1122,9 +1149,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s16>) = COPY $v8
-    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 4 x s16>), %2(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
     PseudoRET
 
 ...
@@ -1153,9 +1181,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s16>) = COPY $v8
-    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 4 x s16>), %2(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
     PseudoRET
 
 ...
@@ -1184,9 +1213,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s16>) = COPY $v8
-    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 4 x s16>), %2(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
     PseudoRET
 
 ...
@@ -1217,10 +1247,11 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s32>) = COPY $v8
-    %2:_(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 2 x s32>)
-    G_STORE %2(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = COPY $v8
+    %2:vrb(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 2 x s32>)
+    %3:vrb(p0) = COPY %0(p0)
+    G_STORE %2(<vscale x 8 x s8>), %3(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
     PseudoRET
 
 ...
@@ -1249,9 +1280,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s32>) = COPY $v8
-    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 2 x s32>), %2(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
     PseudoRET
 
 ...
@@ -1280,9 +1312,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s32>) = COPY $v8
-    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 2 x s32>), %2(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
     PseudoRET
 
 ...
@@ -1311,9 +1344,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s32>) = COPY $v8
-    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 2 x s32>), %2(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
     PseudoRET
 
 ...
@@ -1342,9 +1376,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s32>) = COPY $v8
-    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 2 x s32>), %2(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
     PseudoRET
 
 ...
@@ -1375,10 +1410,11 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s64>) = COPY $v8m2
-    %2:_(<vscale x 16 x s8>) = G_BITCAST %1(<vscale x 2 x s64>)
-    G_STORE %2(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    %2:vrb(<vscale x 16 x s8>) = G_BITCAST %1(<vscale x 2 x s64>)
+    %3:vrb(p0) = COPY %0(p0)
+    G_STORE %2(<vscale x 16 x s8>), %3(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
     PseudoRET
 
 ...
@@ -1407,9 +1443,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s64>) = COPY $v8m2
-    G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 2 x s64>), %2(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
     PseudoRET
 
 ...
@@ -1438,9 +1475,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s64>) = COPY $v8m2
-    G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 2 x s64>), %2(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
     PseudoRET
 
 ...
@@ -1469,9 +1507,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s64>) = COPY $v8m2
-    G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 2 x s64>), %2(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
     PseudoRET
 
 ...
@@ -1500,9 +1539,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY2]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 1 x p0>) = COPY $v8
-    G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 1 x p0>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 1 x p0>), %2(p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
     PseudoRET
 
 ...
@@ -1531,9 +1571,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY2]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x p0>) = COPY $v8
-    G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x p0>) = COPY $v8
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 2 x p0>), %2(p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
     PseudoRET
 
 ...
@@ -1562,9 +1603,10 @@ body:             |
     ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
     ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY2]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
     ; RV64I-NEXT: PseudoRET
-    %0:_(p0) = COPY $x10
-    %1:_(<vscale x 8 x p0>) = COPY $v8m4
-    G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 8 x p0>) = COPY $v8m4
+    %2:vrb(p0) = COPY %0(p0)
+    G_STORE %1(<vscale x 8 x p0>), %2(p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
     PseudoRET
 
 ...



More information about the llvm-commits mailing list