[llvm] [GISEL][RISCV] RegBank Scalable Vector Load/Store (PR #99932)
Jiahan Xie via llvm-commits
llvm-commits at lists.llvm.org
Mon Jul 22 13:49:55 PDT 2024
https://github.com/jiahanxie353 updated https://github.com/llvm/llvm-project/pull/99932
>From 28c2cc5bff27216785759c0080e724108026882b Mon Sep 17 00:00:00 2001
From: Jiahan Xie <jx353 at cornell.edu>
Date: Mon, 22 Jul 2024 16:44:15 -0400
Subject: [PATCH] register bank select for scalable vector load/store
---
.../RISCV/GISel/RISCVRegisterBankInfo.cpp | 33 +-
.../GlobalISel/regbankselect/rvv/load.mir | 1569 ++++++++++++++++
.../GlobalISel/regbankselect/rvv/store.mir | 1570 +++++++++++++++++
3 files changed, 3166 insertions(+), 6 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index d25e96525399e..2d812ab38a30e 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -355,10 +355,20 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
switch (Opc) {
case TargetOpcode::G_LOAD: {
LLT Ty = MRI.getType(MI.getOperand(0).getReg());
- OpdsMapping[0] = GPRValueMapping;
- OpdsMapping[1] = GPRValueMapping;
+ TypeSize Size = Ty.getSizeInBits();
+ if (Ty.isVector()) {
+ OpdsMapping[0] = getVRBValueMapping(Size.getKnownMinValue());
+ OpdsMapping[1] = getVRBValueMapping(Size.getKnownMinValue());
+ } else if (isPreISelGenericFloatingPointOpcode(Opc)) {
+ OpdsMapping[0] = getFPValueMapping(Size.getFixedValue());
+ OpdsMapping[1] = getFPValueMapping(Size.getFixedValue());
+ } else {
+ OpdsMapping[0] = GPRValueMapping;
+ OpdsMapping[1] = GPRValueMapping;
+ }
// Use FPR64 for s64 loads on rv32.
- if (GPRSize == 32 && Ty.getSizeInBits() == 64) {
+ if (GPRSize == 32 && Ty.getSizeInBits().getKnownMinValue() == 64 &&
+ !Ty.isVector()) {
assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
break;
@@ -378,10 +388,21 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
}
case TargetOpcode::G_STORE: {
LLT Ty = MRI.getType(MI.getOperand(0).getReg());
- OpdsMapping[0] = GPRValueMapping;
- OpdsMapping[1] = GPRValueMapping;
+ TypeSize Size = Ty.getSizeInBits();
+ if (Ty.isVector()) {
+ OpdsMapping[0] = getVRBValueMapping(Size.getKnownMinValue());
+ OpdsMapping[1] = getVRBValueMapping(Size.getKnownMinValue());
+ } else if (isPreISelGenericFloatingPointOpcode(Opc)) {
+ OpdsMapping[0] = getFPValueMapping(Size.getFixedValue());
+ OpdsMapping[1] = getFPValueMapping(Size.getFixedValue());
+ } else {
+ OpdsMapping[0] = GPRValueMapping;
+ OpdsMapping[1] = GPRValueMapping;
+ }
+
// Use FPR64 for s64 stores on rv32.
- if (GPRSize == 32 && Ty.getSizeInBits() == 64) {
+ if (GPRSize == 32 && Ty.getSizeInBits().getKnownMinValue() == 64 &&
+ !Ty.isVector()) {
assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
break;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
new file mode 100644
index 0000000000000..ce2bbb6441647
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
@@ -0,0 +1,1569 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV64I %s
+--- |
+
+ define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) #0 {
+ %va = load <vscale x 1 x i8>, ptr %pa, align 1
+ ret <vscale x 1 x i8> %va
+ }
+
+ define <vscale x 2 x i8> @vload_nx2i8(ptr %pa) #0 {
+ %va = load <vscale x 2 x i8>, ptr %pa, align 2
+ ret <vscale x 2 x i8> %va
+ }
+
+ define <vscale x 4 x i8> @vload_nx4i8(ptr %pa) #0 {
+ %va = load <vscale x 4 x i8>, ptr %pa, align 4
+ ret <vscale x 4 x i8> %va
+ }
+
+ define <vscale x 8 x i8> @vload_nx8i8(ptr %pa) #0 {
+ %va = load <vscale x 8 x i8>, ptr %pa, align 8
+ ret <vscale x 8 x i8> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nx16i8(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 16
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 32 x i8> @vload_nx32i8(ptr %pa) #0 {
+ %va = load <vscale x 32 x i8>, ptr %pa, align 32
+ ret <vscale x 32 x i8> %va
+ }
+
+ define <vscale x 64 x i8> @vload_nx64i8(ptr %pa) #0 {
+ %va = load <vscale x 64 x i8>, ptr %pa, align 64
+ ret <vscale x 64 x i8> %va
+ }
+
+ define <vscale x 1 x i16> @vload_nx1i16(ptr %pa) #0 {
+ %va = load <vscale x 1 x i16>, ptr %pa, align 2
+ ret <vscale x 1 x i16> %va
+ }
+
+ define <vscale x 2 x i16> @vload_nx2i16(ptr %pa) #0 {
+ %va = load <vscale x 2 x i16>, ptr %pa, align 4
+ ret <vscale x 2 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nx4i16(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 8
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 8 x i16> @vload_nx8i16(ptr %pa) #0 {
+ %va = load <vscale x 8 x i16>, ptr %pa, align 16
+ ret <vscale x 8 x i16> %va
+ }
+
+ define <vscale x 16 x i16> @vload_nx16i16(ptr %pa) #0 {
+ %va = load <vscale x 16 x i16>, ptr %pa, align 32
+ ret <vscale x 16 x i16> %va
+ }
+
+ define <vscale x 32 x i16> @vload_nx32i16(ptr %pa) #0 {
+ %va = load <vscale x 32 x i16>, ptr %pa, align 64
+ ret <vscale x 32 x i16> %va
+ }
+
+ define <vscale x 1 x i32> @vload_nx1i32(ptr %pa) #0 {
+ %va = load <vscale x 1 x i32>, ptr %pa, align 4
+ ret <vscale x 1 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nx2i32(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 8
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 4 x i32> @vload_nx4i32(ptr %pa) #0 {
+ %va = load <vscale x 4 x i32>, ptr %pa, align 16
+ ret <vscale x 4 x i32> %va
+ }
+
+ define <vscale x 8 x i32> @vload_nx8i32(ptr %pa) #0 {
+ %va = load <vscale x 8 x i32>, ptr %pa, align 32
+ ret <vscale x 8 x i32> %va
+ }
+
+ define <vscale x 16 x i32> @vload_nx16i32(ptr %pa) #0 {
+ %va = load <vscale x 16 x i32>, ptr %pa, align 64
+ ret <vscale x 16 x i32> %va
+ }
+
+ define <vscale x 1 x i64> @vload_nx1i64(ptr %pa) #0 {
+ %va = load <vscale x 1 x i64>, ptr %pa, align 8
+ ret <vscale x 1 x i64> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nx2i64(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 16
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 4 x i64> @vload_nx4i64(ptr %pa) #0 {
+ %va = load <vscale x 4 x i64>, ptr %pa, align 32
+ ret <vscale x 4 x i64> %va
+ }
+
+ define <vscale x 8 x i64> @vload_nx8i64(ptr %pa) #0 {
+ %va = load <vscale x 8 x i64>, ptr %pa, align 64
+ ret <vscale x 8 x i64> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nx16i8_align1(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 1
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nx16i8_align2(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 2
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nx16i8_align16(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 16
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nx16i8_align64(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 64
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nx4i16_align1(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 1
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nx4i16_align2(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 2
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nx4i16_align4(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 4
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nx4i16_align8(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 8
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nx4i16_align16(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 16
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nx2i32_align2(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 2
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nx2i32_align4(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 4
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nx2i32_align8(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 8
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nx2i32_align16(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 16
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nx2i32_align256(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 256
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nx2i64_align4(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 4
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nx2i64_align8(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 8
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nx2i64_align16(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 16
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nx2i64_align32(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 32
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 1 x ptr> @vload_nx1ptr(ptr %pa) #0 {
+ %va = load <vscale x 1 x ptr>, ptr %pa, align 4
+ ret <vscale x 1 x ptr> %va
+ }
+
+ define <vscale x 2 x ptr> @vload_nx2ptr(ptr %pa) #0 {
+ %va = load <vscale x 2 x ptr>, ptr %pa, align 8
+ ret <vscale x 2 x ptr> %va
+ }
+
+ define <vscale x 8 x ptr> @vload_nx8ptr(ptr %pa) #0 {
+ %va = load <vscale x 8 x ptr>, ptr %pa, align 32
+ ret <vscale x 8 x ptr> %va
+ }
+
+ attributes #0 = { "target-features"="+v" }
+
+...
+---
+name: vload_nx1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx1i8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx1i8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2i8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx4i8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx8i8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx8i8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx16i8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx16i8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx32i8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vload_nx32i8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 32 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vload_nx64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx64i8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: vload_nx64i8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 64 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ $v8m8 = COPY %1(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vload_nx1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx1i16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx1i16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2i16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx4i16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx8i16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx8i16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx16i16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vload_nx16i16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vload_nx32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx32i16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: vload_nx32i16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 32 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ $v8m8 = COPY %1(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vload_nx1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx1i32
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx1i32
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i32
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2i32
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i32
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx4i32
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx8i32
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vload_nx8i32
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vload_nx16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx16i32
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: vload_nx16i32
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ $v8m8 = COPY %1(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vload_nx1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx1i64
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx1i64
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i64
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx2i64
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i64
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vload_nx4i64
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vload_nx8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx8i64
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: vload_nx8i64
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vload_nx16i8_align1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx16i8_align1
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx16i8_align1
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx16i8_align2
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx16i8_align2
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx16i8_align2
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx16i8_align16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx16i8_align16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx16i8_align16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx16i8_align64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx16i8_align64
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx16i8_align64
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx4i16_align1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i16_align1
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+ ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx4i16_align1
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+ ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+ %1:_(<vscale x 4 x s16>) = G_BITCAST %2(<vscale x 8 x s8>)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx4i16_align2
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i16_align2
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx4i16_align2
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx4i16_align4
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i16_align4
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx4i16_align4
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx4i16_align8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i16_align8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx4i16_align8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx4i16_align16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i16_align16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx4i16_align16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i32_align2
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i32_align2
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+ ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2i32_align2
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+ ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+ %1:_(<vscale x 2 x s32>) = G_BITCAST %2(<vscale x 8 x s8>)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i32_align4
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i32_align4
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2i32_align4
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i32_align8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i32_align8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2i32_align8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i32_align16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i32_align16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2i32_align16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i32_align256
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i32_align256
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2i32_align256
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i64_align4
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i64_align4
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+ ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx2i64_align4
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+ ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %2:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+ %1:_(<vscale x 2 x s64>) = G_BITCAST %2(<vscale x 16 x s8>)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx2i64_align8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i64_align8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx2i64_align8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx2i64_align16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i64_align16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx2i64_align16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx2i64_align32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i64_align32
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx2i64_align32
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx1ptr
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx1ptr
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx1ptr
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x p0>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2ptr
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2ptr
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2ptr
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x p0>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx8ptr
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx8ptr
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vload_nx8ptr
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 8 x p0>)
+ PseudoRET implicit $v8m4
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
new file mode 100644
index 0000000000000..6c8c3f73fb580
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
@@ -0,0 +1,1570 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV64I %s
+
+--- |
+
+ define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) #0 {
+ store <vscale x 1 x i8> %b, ptr %pa, align 1
+ ret void
+ }
+
+ define void @vstore_nx2i8(ptr %pa, <vscale x 2 x i8> %b) #0 {
+ store <vscale x 2 x i8> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx4i8(ptr %pa, <vscale x 4 x i8> %b) #0 {
+ store <vscale x 4 x i8> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx8i8(ptr %pa, <vscale x 8 x i8> %b) #0 {
+ store <vscale x 8 x i8> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx16i8(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx32i8(ptr %pa, <vscale x 32 x i8> %b) #0 {
+ store <vscale x 32 x i8> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx64i8(ptr %pa, <vscale x 64 x i8> %b) #0 {
+ store <vscale x 64 x i8> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx1i16(ptr %pa, <vscale x 1 x i16> %b) #0 {
+ store <vscale x 1 x i16> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx2i16(ptr %pa, <vscale x 2 x i16> %b) #0 {
+ store <vscale x 2 x i16> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx4i16(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx8i16(ptr %pa, <vscale x 8 x i16> %b) #0 {
+ store <vscale x 8 x i16> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx16i16(ptr %pa, <vscale x 16 x i16> %b) #0 {
+ store <vscale x 16 x i16> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx32i16(ptr %pa, <vscale x 32 x i16> %b) #0 {
+ store <vscale x 32 x i16> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx1i32(ptr %pa, <vscale x 1 x i32> %b) #0 {
+ store <vscale x 1 x i32> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx2i32(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx4i32(ptr %pa, <vscale x 4 x i32> %b) #0 {
+ store <vscale x 4 x i32> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx8i32(ptr %pa, <vscale x 8 x i32> %b) #0 {
+ store <vscale x 8 x i32> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx16i32(ptr %pa, <vscale x 16 x i32> %b) #0 {
+ store <vscale x 16 x i32> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx1i64(ptr %pa, <vscale x 1 x i64> %b) #0 {
+ store <vscale x 1 x i64> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx2i64(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx4i64(ptr %pa, <vscale x 4 x i64> %b) #0 {
+ store <vscale x 4 x i64> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx8i64(ptr %pa, <vscale x 8 x i64> %b) #0 {
+ store <vscale x 8 x i64> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx16i8_align1(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 1
+ ret void
+ }
+
+ define void @vstore_nx16i8_align2(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx16i8_align16(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx16i8_align64(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx4i16_align1(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 1
+ ret void
+ }
+
+ define void @vstore_nx4i16_align2(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx4i16_align4(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx4i16_align8(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx4i16_align16(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx2i32_align2(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx2i32_align4(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx2i32_align8(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx2i32_align16(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx2i32_align256(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 256
+ ret void
+ }
+
+ define void @vstore_nx2i64_align4(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx2i64_align8(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx2i64_align16(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx2i64_align32(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx1ptr(ptr %pa, <vscale x 1 x ptr> %b) #0 {
+ store <vscale x 1 x ptr> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx2ptr(ptr %pa, <vscale x 2 x ptr> %b) #0 {
+ store <vscale x 2 x ptr> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx8ptr(ptr %pa, <vscale x 8 x ptr> %b) #0 {
+ store <vscale x 8 x ptr> %b, ptr %pa, align 32
+ ret void
+ }
+
+ attributes #0 = { "target-features"="+v" }
+
+...
+---
+name: vstore_nx1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx1i8
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY2]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx1i8
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY2]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s8>) = COPY $v8
+ G_STORE %1(<vscale x 1 x s8>), %0(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2i8
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY2]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i8
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY2]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s8>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s8>), %0(p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx4i8
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY2]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i8
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY2]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s8>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s8>), %0(p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx8i8
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx8i8
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s8>) = COPY $v8
+ G_STORE %1(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx16i8
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx16i8
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; RV32I-LABEL: name: vstore_nx32i8
+ ; RV32I: liveins: $x10, $v8m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY2]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx32i8
+ ; RV64I: liveins: $x10, $v8m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY2]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 32 x s8>) = COPY $v8m4
+ G_STORE %1(<vscale x 32 x s8>), %0(p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m8
+
+ ; RV32I-LABEL: name: vstore_nx64i8
+ ; RV32I: liveins: $x10, $v8m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY2]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx64i8
+ ; RV64I: liveins: $x10, $v8m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY2]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 64 x s8>) = COPY $v8m8
+ G_STORE %1(<vscale x 64 x s8>), %0(p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx1i16
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY2]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx1i16
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY2]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 1 x s16>), %0(p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2i16
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY2]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i16
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY2]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s16>), %0(p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx4i16
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i16
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx8i16
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY2]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx8i16
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY2]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s16>) = COPY $v8m2
+ G_STORE %1(<vscale x 8 x s16>), %0(p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; RV32I-LABEL: name: vstore_nx16i16
+ ; RV32I: liveins: $x10, $v8m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY2]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx16i16
+ ; RV64I: liveins: $x10, $v8m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY2]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s16>) = COPY $v8m4
+ G_STORE %1(<vscale x 16 x s16>), %0(p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m8
+
+ ; RV32I-LABEL: name: vstore_nx32i16
+ ; RV32I: liveins: $x10, $v8m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY2]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx32i16
+ ; RV64I: liveins: $x10, $v8m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY2]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 32 x s16>) = COPY $v8m8
+ G_STORE %1(<vscale x 32 x s16>), %0(p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx1i32
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY2]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx1i32
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY2]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 1 x s32>), %0(p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2i32
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i32
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx4i32
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY2]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i32
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY2]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s32>) = COPY $v8m2
+ G_STORE %1(<vscale x 4 x s32>), %0(p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; RV32I-LABEL: name: vstore_nx8i32
+ ; RV32I: liveins: $x10, $v8m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY2]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx8i32
+ ; RV64I: liveins: $x10, $v8m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY2]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s32>) = COPY $v8m4
+ G_STORE %1(<vscale x 8 x s32>), %0(p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m8
+
+ ; RV32I-LABEL: name: vstore_nx16i32
+ ; RV32I: liveins: $x10, $v8m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY2]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx16i32
+ ; RV64I: liveins: $x10, $v8m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY2]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s32>) = COPY $v8m8
+ G_STORE %1(<vscale x 16 x s32>), %0(p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx1i64
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY2]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx1i64
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY2]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s64>) = COPY $v8
+ G_STORE %1(<vscale x 1 x s64>), %0(p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx2i64
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i64
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; RV32I-LABEL: name: vstore_nx4i64
+ ; RV32I: liveins: $x10, $v8m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY2]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i64
+ ; RV64I: liveins: $x10, $v8m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY2]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s64>) = COPY $v8m4
+ G_STORE %1(<vscale x 4 x s64>), %0(p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m8
+
+ ; RV32I-LABEL: name: vstore_nx8i64
+ ; RV32I: liveins: $x10, $v8m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY2]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx8i64
+ ; RV64I: liveins: $x10, $v8m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY2]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s64>) = COPY $v8m8
+ G_STORE %1(<vscale x 8 x s64>), %0(p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8_align1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx16i8_align1
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx16i8_align1
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8_align2
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx16i8_align2
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx16i8_align2
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8_align16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx16i8_align16
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx16i8_align16
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8_align64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx16i8_align64
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx16i8_align64
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx4i16_align1
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i16_align1
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ %2:_(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 4 x s16>)
+ G_STORE %2(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align2
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx4i16_align2
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i16_align2
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align4
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx4i16_align4
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i16_align4
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx4i16_align8
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i16_align8
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx4i16_align16
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i16_align16
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align2
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2i32_align2
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i32_align2
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ %2:_(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 2 x s32>)
+ G_STORE %2(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align4
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2i32_align4
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i32_align4
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2i32_align8
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i32_align8
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2i32_align16
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i32_align16
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align256
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2i32_align256
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i32_align256
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64_align4
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx2i64_align4
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i64_align4
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ %2:_(<vscale x 16 x s8>) = G_BITCAST %1(<vscale x 2 x s64>)
+ G_STORE %2(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64_align8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx2i64_align8
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i64_align8
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64_align16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx2i64_align16
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i64_align16
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64_align32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx2i64_align32
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i64_align32
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+ PseudoRET
+
+...
+---
+name: vstore_nx1ptr
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx1ptr
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x p0>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY2]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx1ptr
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x p0>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY2]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x p0>) = COPY $v8
+ G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2ptr
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2ptr
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x p0>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY2]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2ptr
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x p0>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY2]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x p0>) = COPY $v8
+ G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8ptr
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; RV32I-LABEL: name: vstore_nx8ptr
+ ; RV32I: liveins: $x10, $v8m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x p0>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY2]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx8ptr
+ ; RV64I: liveins: $x10, $v8m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x p0>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY2]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x p0>) = COPY $v8m4
+ G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+ PseudoRET
+
+...
More information about the llvm-commits
mailing list