[llvm] [GISEL][RISCV] RegBank Scalable Vector Load/Store (PR #99932)
Jiahan Xie via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 30 20:35:03 PDT 2024
https://github.com/jiahanxie353 updated https://github.com/llvm/llvm-project/pull/99932
>From 207d8baa1d1639afb93ef1124597496a25683e84 Mon Sep 17 00:00:00 2001
From: Jiahan Xie <jx353 at cornell.edu>
Date: Mon, 22 Jul 2024 16:44:15 -0400
Subject: [PATCH 1/6] register bank select for scalable vector load/store
---
.../RISCV/GISel/RISCVRegisterBankInfo.cpp | 33 +-
.../GlobalISel/regbankselect/rvv/load.mir | 1569 ++++++++++++++++
.../GlobalISel/regbankselect/rvv/store.mir | 1570 +++++++++++++++++
3 files changed, 3166 insertions(+), 6 deletions(-)
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 43bbc8589e7e2..b9e52b7d8682d 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -310,10 +310,20 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
switch (Opc) {
case TargetOpcode::G_LOAD: {
LLT Ty = MRI.getType(MI.getOperand(0).getReg());
- OpdsMapping[0] = GPRValueMapping;
- OpdsMapping[1] = GPRValueMapping;
+ TypeSize Size = Ty.getSizeInBits();
+ if (Ty.isVector()) {
+ OpdsMapping[0] = getVRBValueMapping(Size.getKnownMinValue());
+ OpdsMapping[1] = getVRBValueMapping(Size.getKnownMinValue());
+ } else if (isPreISelGenericFloatingPointOpcode(Opc)) {
+ OpdsMapping[0] = getFPValueMapping(Size.getFixedValue());
+ OpdsMapping[1] = getFPValueMapping(Size.getFixedValue());
+ } else {
+ OpdsMapping[0] = GPRValueMapping;
+ OpdsMapping[1] = GPRValueMapping;
+ }
// Use FPR64 for s64 loads on rv32.
- if (GPRSize == 32 && Ty.getSizeInBits() == 64) {
+ if (GPRSize == 32 && Ty.getSizeInBits().getKnownMinValue() == 64 &&
+ !Ty.isVector()) {
assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
break;
@@ -333,10 +343,21 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
}
case TargetOpcode::G_STORE: {
LLT Ty = MRI.getType(MI.getOperand(0).getReg());
- OpdsMapping[0] = GPRValueMapping;
- OpdsMapping[1] = GPRValueMapping;
+ TypeSize Size = Ty.getSizeInBits();
+ if (Ty.isVector()) {
+ OpdsMapping[0] = getVRBValueMapping(Size.getKnownMinValue());
+ OpdsMapping[1] = getVRBValueMapping(Size.getKnownMinValue());
+ } else if (isPreISelGenericFloatingPointOpcode(Opc)) {
+ OpdsMapping[0] = getFPValueMapping(Size.getFixedValue());
+ OpdsMapping[1] = getFPValueMapping(Size.getFixedValue());
+ } else {
+ OpdsMapping[0] = GPRValueMapping;
+ OpdsMapping[1] = GPRValueMapping;
+ }
+
// Use FPR64 for s64 stores on rv32.
- if (GPRSize == 32 && Ty.getSizeInBits() == 64) {
+ if (GPRSize == 32 && Ty.getSizeInBits().getKnownMinValue() == 64 &&
+ !Ty.isVector()) {
assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
break;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
new file mode 100644
index 0000000000000..ce2bbb6441647
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
@@ -0,0 +1,1569 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV64I %s
+--- |
+
+ define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) #0 {
+ %va = load <vscale x 1 x i8>, ptr %pa, align 1
+ ret <vscale x 1 x i8> %va
+ }
+
+ define <vscale x 2 x i8> @vload_nx2i8(ptr %pa) #0 {
+ %va = load <vscale x 2 x i8>, ptr %pa, align 2
+ ret <vscale x 2 x i8> %va
+ }
+
+ define <vscale x 4 x i8> @vload_nx4i8(ptr %pa) #0 {
+ %va = load <vscale x 4 x i8>, ptr %pa, align 4
+ ret <vscale x 4 x i8> %va
+ }
+
+ define <vscale x 8 x i8> @vload_nx8i8(ptr %pa) #0 {
+ %va = load <vscale x 8 x i8>, ptr %pa, align 8
+ ret <vscale x 8 x i8> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nx16i8(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 16
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 32 x i8> @vload_nx32i8(ptr %pa) #0 {
+ %va = load <vscale x 32 x i8>, ptr %pa, align 32
+ ret <vscale x 32 x i8> %va
+ }
+
+ define <vscale x 64 x i8> @vload_nx64i8(ptr %pa) #0 {
+ %va = load <vscale x 64 x i8>, ptr %pa, align 64
+ ret <vscale x 64 x i8> %va
+ }
+
+ define <vscale x 1 x i16> @vload_nx1i16(ptr %pa) #0 {
+ %va = load <vscale x 1 x i16>, ptr %pa, align 2
+ ret <vscale x 1 x i16> %va
+ }
+
+ define <vscale x 2 x i16> @vload_nx2i16(ptr %pa) #0 {
+ %va = load <vscale x 2 x i16>, ptr %pa, align 4
+ ret <vscale x 2 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nx4i16(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 8
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 8 x i16> @vload_nx8i16(ptr %pa) #0 {
+ %va = load <vscale x 8 x i16>, ptr %pa, align 16
+ ret <vscale x 8 x i16> %va
+ }
+
+ define <vscale x 16 x i16> @vload_nx16i16(ptr %pa) #0 {
+ %va = load <vscale x 16 x i16>, ptr %pa, align 32
+ ret <vscale x 16 x i16> %va
+ }
+
+ define <vscale x 32 x i16> @vload_nx32i16(ptr %pa) #0 {
+ %va = load <vscale x 32 x i16>, ptr %pa, align 64
+ ret <vscale x 32 x i16> %va
+ }
+
+ define <vscale x 1 x i32> @vload_nx1i32(ptr %pa) #0 {
+ %va = load <vscale x 1 x i32>, ptr %pa, align 4
+ ret <vscale x 1 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nx2i32(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 8
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 4 x i32> @vload_nx4i32(ptr %pa) #0 {
+ %va = load <vscale x 4 x i32>, ptr %pa, align 16
+ ret <vscale x 4 x i32> %va
+ }
+
+ define <vscale x 8 x i32> @vload_nx8i32(ptr %pa) #0 {
+ %va = load <vscale x 8 x i32>, ptr %pa, align 32
+ ret <vscale x 8 x i32> %va
+ }
+
+ define <vscale x 16 x i32> @vload_nx16i32(ptr %pa) #0 {
+ %va = load <vscale x 16 x i32>, ptr %pa, align 64
+ ret <vscale x 16 x i32> %va
+ }
+
+ define <vscale x 1 x i64> @vload_nx1i64(ptr %pa) #0 {
+ %va = load <vscale x 1 x i64>, ptr %pa, align 8
+ ret <vscale x 1 x i64> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nx2i64(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 16
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 4 x i64> @vload_nx4i64(ptr %pa) #0 {
+ %va = load <vscale x 4 x i64>, ptr %pa, align 32
+ ret <vscale x 4 x i64> %va
+ }
+
+ define <vscale x 8 x i64> @vload_nx8i64(ptr %pa) #0 {
+ %va = load <vscale x 8 x i64>, ptr %pa, align 64
+ ret <vscale x 8 x i64> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nx16i8_align1(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 1
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nx16i8_align2(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 2
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nx16i8_align16(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 16
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nx16i8_align64(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 64
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nx4i16_align1(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 1
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nx4i16_align2(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 2
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nx4i16_align4(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 4
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nx4i16_align8(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 8
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nx4i16_align16(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 16
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nx2i32_align2(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 2
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nx2i32_align4(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 4
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nx2i32_align8(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 8
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nx2i32_align16(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 16
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nx2i32_align256(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 256
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nx2i64_align4(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 4
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nx2i64_align8(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 8
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nx2i64_align16(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 16
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nx2i64_align32(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 32
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 1 x ptr> @vload_nx1ptr(ptr %pa) #0 {
+ %va = load <vscale x 1 x ptr>, ptr %pa, align 4
+ ret <vscale x 1 x ptr> %va
+ }
+
+ define <vscale x 2 x ptr> @vload_nx2ptr(ptr %pa) #0 {
+ %va = load <vscale x 2 x ptr>, ptr %pa, align 8
+ ret <vscale x 2 x ptr> %va
+ }
+
+ define <vscale x 8 x ptr> @vload_nx8ptr(ptr %pa) #0 {
+ %va = load <vscale x 8 x ptr>, ptr %pa, align 32
+ ret <vscale x 8 x ptr> %va
+ }
+
+ attributes #0 = { "target-features"="+v" }
+
+...
+---
+name: vload_nx1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx1i8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx1i8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2i8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx4i8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx8i8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx8i8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx16i8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx16i8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx32i8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vload_nx32i8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 32 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vload_nx64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx64i8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: vload_nx64i8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 64 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ $v8m8 = COPY %1(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vload_nx1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx1i16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx1i16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2i16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx4i16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx8i16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx8i16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx16i16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vload_nx16i16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vload_nx32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx32i16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: vload_nx32i16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 32 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ $v8m8 = COPY %1(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vload_nx1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx1i32
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx1i32
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i32
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2i32
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i32
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx4i32
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx8i32
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vload_nx8i32
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vload_nx16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx16i32
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: vload_nx16i32
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ $v8m8 = COPY %1(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vload_nx1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx1i64
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx1i64
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i64
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx2i64
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i64
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vload_nx4i64
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vload_nx8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx8i64
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ ; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m8
+ ;
+ ; RV64I-LABEL: name: vload_nx8i64
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ ; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vload_nx16i8_align1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx16i8_align1
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx16i8_align1
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx16i8_align2
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx16i8_align2
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx16i8_align2
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx16i8_align16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx16i8_align16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx16i8_align16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx16i8_align64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx16i8_align64
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx16i8_align64
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx4i16_align1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i16_align1
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+ ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx4i16_align1
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+ ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+ %1:_(<vscale x 4 x s16>) = G_BITCAST %2(<vscale x 8 x s8>)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx4i16_align2
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i16_align2
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx4i16_align2
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx4i16_align4
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i16_align4
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx4i16_align4
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx4i16_align8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i16_align8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx4i16_align8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx4i16_align16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx4i16_align16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx4i16_align16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i32_align2
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i32_align2
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+ ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+ ; RV32I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2i32_align2
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+ ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+ ; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+ %1:_(<vscale x 2 x s32>) = G_BITCAST %2(<vscale x 8 x s8>)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i32_align4
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i32_align4
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2i32_align4
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i32_align8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i32_align8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2i32_align8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i32_align16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i32_align16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2i32_align16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i32_align256
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i32_align256
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2i32_align256
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2i64_align4
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i64_align4
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+ ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
+ ; RV32I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx2i64_align4
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+ ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
+ ; RV64I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %2:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+ %1:_(<vscale x 2 x s64>) = G_BITCAST %2(<vscale x 16 x s8>)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx2i64_align8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i64_align8
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx2i64_align8
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx2i64_align16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i64_align16
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx2i64_align16
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx2i64_align32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2i64_align32
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ ; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m2
+ ;
+ ; RV64I-LABEL: name: vload_nx2i64_align32
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ ; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nx1ptr
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx1ptr
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx1ptr
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x p0>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx2ptr
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx2ptr
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
+ ; RV32I-NEXT: PseudoRET implicit $v8
+ ;
+ ; RV64I-LABEL: name: vload_nx2ptr
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
+ ; RV64I-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x p0>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nx8ptr
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; RV32I-LABEL: name: vload_nx8ptr
+ ; RV32I: liveins: $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ ; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
+ ; RV32I-NEXT: PseudoRET implicit $v8m4
+ ;
+ ; RV64I-LABEL: name: vload_nx8ptr
+ ; RV64I: liveins: $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ ; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
+ ; RV64I-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 8 x p0>)
+ PseudoRET implicit $v8m4
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
new file mode 100644
index 0000000000000..6c8c3f73fb580
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
@@ -0,0 +1,1570 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN: -o - | FileCheck -check-prefix=RV64I %s
+
+--- |
+
+ define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) #0 {
+ store <vscale x 1 x i8> %b, ptr %pa, align 1
+ ret void
+ }
+
+ define void @vstore_nx2i8(ptr %pa, <vscale x 2 x i8> %b) #0 {
+ store <vscale x 2 x i8> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx4i8(ptr %pa, <vscale x 4 x i8> %b) #0 {
+ store <vscale x 4 x i8> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx8i8(ptr %pa, <vscale x 8 x i8> %b) #0 {
+ store <vscale x 8 x i8> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx16i8(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx32i8(ptr %pa, <vscale x 32 x i8> %b) #0 {
+ store <vscale x 32 x i8> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx64i8(ptr %pa, <vscale x 64 x i8> %b) #0 {
+ store <vscale x 64 x i8> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx1i16(ptr %pa, <vscale x 1 x i16> %b) #0 {
+ store <vscale x 1 x i16> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx2i16(ptr %pa, <vscale x 2 x i16> %b) #0 {
+ store <vscale x 2 x i16> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx4i16(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx8i16(ptr %pa, <vscale x 8 x i16> %b) #0 {
+ store <vscale x 8 x i16> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx16i16(ptr %pa, <vscale x 16 x i16> %b) #0 {
+ store <vscale x 16 x i16> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx32i16(ptr %pa, <vscale x 32 x i16> %b) #0 {
+ store <vscale x 32 x i16> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx1i32(ptr %pa, <vscale x 1 x i32> %b) #0 {
+ store <vscale x 1 x i32> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx2i32(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx4i32(ptr %pa, <vscale x 4 x i32> %b) #0 {
+ store <vscale x 4 x i32> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx8i32(ptr %pa, <vscale x 8 x i32> %b) #0 {
+ store <vscale x 8 x i32> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx16i32(ptr %pa, <vscale x 16 x i32> %b) #0 {
+ store <vscale x 16 x i32> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx1i64(ptr %pa, <vscale x 1 x i64> %b) #0 {
+ store <vscale x 1 x i64> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx2i64(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx4i64(ptr %pa, <vscale x 4 x i64> %b) #0 {
+ store <vscale x 4 x i64> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx8i64(ptr %pa, <vscale x 8 x i64> %b) #0 {
+ store <vscale x 8 x i64> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx16i8_align1(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 1
+ ret void
+ }
+
+ define void @vstore_nx16i8_align2(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx16i8_align16(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx16i8_align64(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx4i16_align1(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 1
+ ret void
+ }
+
+ define void @vstore_nx4i16_align2(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx4i16_align4(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx4i16_align8(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx4i16_align16(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx2i32_align2(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx2i32_align4(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx2i32_align8(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx2i32_align16(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx2i32_align256(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 256
+ ret void
+ }
+
+ define void @vstore_nx2i64_align4(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx2i64_align8(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx2i64_align16(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx2i64_align32(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx1ptr(ptr %pa, <vscale x 1 x ptr> %b) #0 {
+ store <vscale x 1 x ptr> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx2ptr(ptr %pa, <vscale x 2 x ptr> %b) #0 {
+ store <vscale x 2 x ptr> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx8ptr(ptr %pa, <vscale x 8 x ptr> %b) #0 {
+ store <vscale x 8 x ptr> %b, ptr %pa, align 32
+ ret void
+ }
+
+ attributes #0 = { "target-features"="+v" }
+
+...
+---
+name: vstore_nx1i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx1i8
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY2]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx1i8
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY2]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s8>) = COPY $v8
+ G_STORE %1(<vscale x 1 x s8>), %0(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2i8
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY2]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i8
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY2]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s8>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s8>), %0(p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx4i8
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY2]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i8
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY2]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s8>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s8>), %0(p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx8i8
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx8i8
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s8>) = COPY $v8
+ G_STORE %1(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx16i8
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx16i8
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx32i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; RV32I-LABEL: name: vstore_nx32i8
+ ; RV32I: liveins: $x10, $v8m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY2]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx32i8
+ ; RV64I: liveins: $x10, $v8m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY2]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 32 x s8>) = COPY $v8m4
+ G_STORE %1(<vscale x 32 x s8>), %0(p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx64i8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m8
+
+ ; RV32I-LABEL: name: vstore_nx64i8
+ ; RV32I: liveins: $x10, $v8m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY2]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx64i8
+ ; RV64I: liveins: $x10, $v8m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY2]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 64 x s8>) = COPY $v8m8
+ G_STORE %1(<vscale x 64 x s8>), %0(p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx1i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx1i16
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY2]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx1i16
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY2]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 1 x s16>), %0(p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2i16
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY2]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i16
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY2]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s16>), %0(p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx4i16
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i16
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx8i16
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY2]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx8i16
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY2]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s16>) = COPY $v8m2
+ G_STORE %1(<vscale x 8 x s16>), %0(p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; RV32I-LABEL: name: vstore_nx16i16
+ ; RV32I: liveins: $x10, $v8m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY2]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx16i16
+ ; RV64I: liveins: $x10, $v8m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY2]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s16>) = COPY $v8m4
+ G_STORE %1(<vscale x 16 x s16>), %0(p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx32i16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m8
+
+ ; RV32I-LABEL: name: vstore_nx32i16
+ ; RV32I: liveins: $x10, $v8m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY2]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx32i16
+ ; RV64I: liveins: $x10, $v8m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY2]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 32 x s16>) = COPY $v8m8
+ G_STORE %1(<vscale x 32 x s16>), %0(p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx1i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx1i32
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY2]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx1i32
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY2]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 1 x s32>), %0(p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2i32
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i32
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx4i32
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY2]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i32
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY2]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s32>) = COPY $v8m2
+ G_STORE %1(<vscale x 4 x s32>), %0(p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; RV32I-LABEL: name: vstore_nx8i32
+ ; RV32I: liveins: $x10, $v8m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY2]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx8i32
+ ; RV64I: liveins: $x10, $v8m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY2]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s32>) = COPY $v8m4
+ G_STORE %1(<vscale x 8 x s32>), %0(p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m8
+
+ ; RV32I-LABEL: name: vstore_nx16i32
+ ; RV32I: liveins: $x10, $v8m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY2]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx16i32
+ ; RV64I: liveins: $x10, $v8m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY2]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s32>) = COPY $v8m8
+ G_STORE %1(<vscale x 16 x s32>), %0(p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx1i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx1i64
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY2]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx1i64
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY2]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s64>) = COPY $v8
+ G_STORE %1(<vscale x 1 x s64>), %0(p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx2i64
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i64
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; RV32I-LABEL: name: vstore_nx4i64
+ ; RV32I: liveins: $x10, $v8m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY2]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i64
+ ; RV64I: liveins: $x10, $v8m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY2]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s64>) = COPY $v8m4
+ G_STORE %1(<vscale x 4 x s64>), %0(p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8i64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m8
+
+ ; RV32I-LABEL: name: vstore_nx8i64
+ ; RV32I: liveins: $x10, $v8m8
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY2]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx8i64
+ ; RV64I: liveins: $x10, $v8m8
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY2]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s64>) = COPY $v8m8
+ G_STORE %1(<vscale x 8 x s64>), %0(p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8_align1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx16i8_align1
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx16i8_align1
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8_align2
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx16i8_align2
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx16i8_align2
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8_align16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx16i8_align16
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx16i8_align16
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8_align64
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx16i8_align64
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx16i8_align64
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align1
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx4i16_align1
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i16_align1
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ %2:_(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 4 x s16>)
+ G_STORE %2(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align2
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx4i16_align2
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i16_align2
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align4
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx4i16_align4
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i16_align4
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx4i16_align8
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i16_align8
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx4i16_align16
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx4i16_align16
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align2
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2i32_align2
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i32_align2
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ %2:_(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 2 x s32>)
+ G_STORE %2(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align4
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2i32_align4
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i32_align4
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2i32_align8
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i32_align8
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2i32_align16
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i32_align16
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align256
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2i32_align256
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i32_align256
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64_align4
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx2i64_align4
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i64_align4
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ %2:_(<vscale x 16 x s8>) = G_BITCAST %1(<vscale x 2 x s64>)
+ G_STORE %2(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64_align8
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx2i64_align8
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i64_align8
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64_align16
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx2i64_align16
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i64_align16
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64_align32
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; RV32I-LABEL: name: vstore_nx2i64_align32
+ ; RV32I: liveins: $x10, $v8m2
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2i64_align32
+ ; RV64I: liveins: $x10, $v8m2
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+ PseudoRET
+
+...
+---
+name: vstore_nx1ptr
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx1ptr
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x p0>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY2]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx1ptr
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x p0>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY2]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x p0>) = COPY $v8
+ G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2ptr
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; RV32I-LABEL: name: vstore_nx2ptr
+ ; RV32I: liveins: $v8, $x10
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x p0>) = COPY $v8
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY2]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx2ptr
+ ; RV64I: liveins: $v8, $x10
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x p0>) = COPY $v8
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY2]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x p0>) = COPY $v8
+ G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8ptr
+legalized: true
+tracksRegLiveness: true
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; RV32I-LABEL: name: vstore_nx8ptr
+ ; RV32I: liveins: $x10, $v8m4
+ ; RV32I-NEXT: {{ $}}
+ ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x p0>) = COPY $v8m4
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY2]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+ ; RV32I-NEXT: PseudoRET
+ ;
+ ; RV64I-LABEL: name: vstore_nx8ptr
+ ; RV64I: liveins: $x10, $v8m4
+ ; RV64I-NEXT: {{ $}}
+ ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+ ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x p0>) = COPY $v8m4
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY2]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+ ; RV64I-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x p0>) = COPY $v8m4
+ G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+ PseudoRET
+
+...
>From 8bc2a1a47b7e198deed8a371e3428d7924a50043 Mon Sep 17 00:00:00 2001
From: Jiahan Xie <jx353 at cornell.edu>
Date: Tue, 30 Jul 2024 12:01:44 -0400
Subject: [PATCH 2/6] made some changes based on review
---
.../Target/RISCV/GISel/RISCVRegisterBankInfo.cpp | 14 ++++----------
1 file changed, 4 insertions(+), 10 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index b9e52b7d8682d..6d7aeee83b0c6 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -314,16 +314,13 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
if (Ty.isVector()) {
OpdsMapping[0] = getVRBValueMapping(Size.getKnownMinValue());
OpdsMapping[1] = getVRBValueMapping(Size.getKnownMinValue());
- } else if (isPreISelGenericFloatingPointOpcode(Opc)) {
- OpdsMapping[0] = getFPValueMapping(Size.getFixedValue());
- OpdsMapping[1] = getFPValueMapping(Size.getFixedValue());
} else {
OpdsMapping[0] = GPRValueMapping;
OpdsMapping[1] = GPRValueMapping;
}
// Use FPR64 for s64 loads on rv32.
- if (GPRSize == 32 && Ty.getSizeInBits().getKnownMinValue() == 64 &&
- !Ty.isVector()) {
+ if (!Ty.isVector() && GPRSize == 32 &&
+ Ty.getSizeInBits().getKnownMinValue() == 64) {
assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
break;
@@ -347,17 +344,14 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
if (Ty.isVector()) {
OpdsMapping[0] = getVRBValueMapping(Size.getKnownMinValue());
OpdsMapping[1] = getVRBValueMapping(Size.getKnownMinValue());
- } else if (isPreISelGenericFloatingPointOpcode(Opc)) {
- OpdsMapping[0] = getFPValueMapping(Size.getFixedValue());
- OpdsMapping[1] = getFPValueMapping(Size.getFixedValue());
} else {
OpdsMapping[0] = GPRValueMapping;
OpdsMapping[1] = GPRValueMapping;
}
// Use FPR64 for s64 stores on rv32.
- if (GPRSize == 32 && Ty.getSizeInBits().getKnownMinValue() == 64 &&
- !Ty.isVector()) {
+ if (!Ty.isVector() && GPRSize == 32 &&
+ Ty.getSizeInBits().getKnownMinValue() == 64) {
assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
break;
>From 1957eebb0349404e6107be020a4f4d6d3d963bb7 Mon Sep 17 00:00:00 2001
From: Jiahan Xie <jx353 at cornell.edu>
Date: Tue, 30 Jul 2024 13:43:54 -0400
Subject: [PATCH 3/6] use type size instead of unsigned when getting value
mapping
---
llvm/lib/CodeGen/RegisterBankInfo.cpp | 5 +-
.../GlobalISel/regbankselect/rvv/load.mir | 221 ++++++++-----
.../GlobalISel/regbankselect/rvv/store.mir | 308 ++++++++++--------
3 files changed, 310 insertions(+), 224 deletions(-)
diff --git a/llvm/lib/CodeGen/RegisterBankInfo.cpp b/llvm/lib/CodeGen/RegisterBankInfo.cpp
index 72b07eb1902d9..00dcc1fbcd0c7 100644
--- a/llvm/lib/CodeGen/RegisterBankInfo.cpp
+++ b/llvm/lib/CodeGen/RegisterBankInfo.cpp
@@ -215,8 +215,9 @@ RegisterBankInfo::getInstrMappingImpl(const MachineInstr &MI) const {
}
}
- unsigned Size = getSizeInBits(Reg, MRI, TRI);
- const ValueMapping *ValMapping = &getValueMapping(0, Size, *CurRegBank);
+ TypeSize Size = getSizeInBits(Reg, MRI, TRI);
+ const ValueMapping *ValMapping =
+ &getValueMapping(0, Size.getKnownMinValue(), *CurRegBank);
if (IsCopyLike) {
if (!OperandsMapping[0]) {
if (MI.isRegSequence()) {
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
index ce2bbb6441647..f542d1b7e6a5d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
@@ -250,8 +250,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 1 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 1 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
$v8 = COPY %1(<vscale x 1 x s8>)
PseudoRET implicit $v8
@@ -281,8 +282,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 2 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
$v8 = COPY %1(<vscale x 2 x s8>)
PseudoRET implicit $v8
@@ -312,8 +314,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 4 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
$v8 = COPY %1(<vscale x 4 x s8>)
PseudoRET implicit $v8
@@ -343,8 +346,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 8 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
$v8 = COPY %1(<vscale x 8 x s8>)
PseudoRET implicit $v8
@@ -374,8 +378,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 16 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
@@ -405,8 +410,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m4
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 32 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 32 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
$v8m4 = COPY %1(<vscale x 32 x s8>)
PseudoRET implicit $v8m4
@@ -436,8 +442,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 64 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 64 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
$v8m8 = COPY %1(<vscale x 64 x s8>)
PseudoRET implicit $v8m8
@@ -467,8 +474,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 1 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 1 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
$v8 = COPY %1(<vscale x 1 x s16>)
PseudoRET implicit $v8
@@ -498,8 +506,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 2 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
$v8 = COPY %1(<vscale x 2 x s16>)
PseudoRET implicit $v8
@@ -529,8 +538,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 4 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
@@ -560,8 +570,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8m2
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 8 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 8 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 8 x s16>)
PseudoRET implicit $v8m2
@@ -591,8 +602,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8m4
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 16 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 16 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
$v8m4 = COPY %1(<vscale x 16 x s16>)
PseudoRET implicit $v8m4
@@ -622,8 +634,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8m8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 32 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 32 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
$v8m8 = COPY %1(<vscale x 32 x s16>)
PseudoRET implicit $v8m8
@@ -653,8 +666,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 1 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 1 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
$v8 = COPY %1(<vscale x 1 x s32>)
PseudoRET implicit $v8
@@ -684,8 +698,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 2 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
@@ -715,8 +730,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8m2
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 4 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 4 x s32>)
PseudoRET implicit $v8m2
@@ -746,8 +762,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8m4
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 8 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 8 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
$v8m4 = COPY %1(<vscale x 8 x s32>)
PseudoRET implicit $v8m4
@@ -777,8 +794,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8m8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 16 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 16 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
$v8m8 = COPY %1(<vscale x 16 x s32>)
PseudoRET implicit $v8m8
@@ -808,8 +826,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 1 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 1 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
$v8 = COPY %1(<vscale x 1 x s64>)
PseudoRET implicit $v8
@@ -839,8 +858,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 2 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
@@ -870,8 +890,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m4
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 4 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
$v8m4 = COPY %1(<vscale x 4 x s64>)
PseudoRET implicit $v8m4
@@ -901,8 +922,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 8 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 8 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
$v8m8 = COPY %1(<vscale x 8 x s64>)
PseudoRET implicit $v8m8
@@ -932,8 +954,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 16 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
$v8m2 = COPY %1(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
@@ -963,8 +986,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 16 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
$v8m2 = COPY %1(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
@@ -994,8 +1018,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 16 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
@@ -1025,8 +1050,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 16 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
$v8m2 = COPY %1(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
@@ -1058,9 +1084,10 @@ body: |
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
- %1:_(<vscale x 4 x s16>) = G_BITCAST %2(<vscale x 8 x s8>)
+ %0:gprb(p0) = COPY $x10
+ %3:vrb(p0) = COPY %0(p0)
+ %2:vrb(<vscale x 8 x s8>) = G_LOAD %3(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+ %1:vrb(<vscale x 4 x s16>) = G_BITCAST %2(<vscale x 8 x s8>)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
@@ -1090,8 +1117,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 4 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
@@ -1121,8 +1149,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 4 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
@@ -1152,8 +1181,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 4 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
@@ -1183,8 +1213,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 4 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
@@ -1216,9 +1247,10 @@ body: |
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
- %1:_(<vscale x 2 x s32>) = G_BITCAST %2(<vscale x 8 x s8>)
+ %0:gprb(p0) = COPY $x10
+ %3:vrb(p0) = COPY %0(p0)
+ %2:vrb(<vscale x 8 x s8>) = G_LOAD %3(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+ %1:vrb(<vscale x 2 x s32>) = G_BITCAST %2(<vscale x 8 x s8>)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
@@ -1248,8 +1280,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 2 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
@@ -1279,8 +1312,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 2 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
@@ -1310,8 +1344,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 2 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
@@ -1341,8 +1376,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 2 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
@@ -1374,9 +1410,10 @@ body: |
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
- %0:_(p0) = COPY $x10
- %2:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
- %1:_(<vscale x 2 x s64>) = G_BITCAST %2(<vscale x 16 x s8>)
+ %0:gprb(p0) = COPY $x10
+ %3:vrb(p0) = COPY %0(p0)
+ %2:vrb(<vscale x 16 x s8>) = G_LOAD %3(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+ %1:vrb(<vscale x 2 x s64>) = G_BITCAST %2(<vscale x 16 x s8>)
$v8m2 = COPY %1(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
@@ -1406,8 +1443,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 2 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
$v8m2 = COPY %1(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
@@ -1437,8 +1475,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 2 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
@@ -1468,8 +1507,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 2 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
$v8m2 = COPY %1(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
@@ -1499,8 +1539,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 1 x p0>) = G_LOAD %2(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
$v8 = COPY %1(<vscale x 1 x p0>)
PseudoRET implicit $v8
@@ -1530,8 +1571,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
; RV64I-NEXT: PseudoRET implicit $v8
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 2 x p0>) = G_LOAD %2(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
$v8 = COPY %1(<vscale x 2 x p0>)
PseudoRET implicit $v8
@@ -1561,8 +1603,9 @@ body: |
; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
; RV64I-NEXT: PseudoRET implicit $v8m4
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %2:vrb(p0) = COPY %0(p0)
+ %1:vrb(<vscale x 8 x p0>) = G_LOAD %2(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
$v8m4 = COPY %1(<vscale x 8 x p0>)
PseudoRET implicit $v8m4
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
index 6c8c3f73fb580..83cf77af25765 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
@@ -5,7 +5,6 @@
# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
# RUN: -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
# RUN: -o - | FileCheck -check-prefix=RV64I %s
-
--- |
define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) #0 {
@@ -251,9 +250,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY2]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 1 x s8>) = COPY $v8
- G_STORE %1(<vscale x 1 x s8>), %0(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 1 x s8>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 1 x s8>), %2(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
PseudoRET
...
@@ -282,9 +282,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY2]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s8>) = COPY $v8
- G_STORE %1(<vscale x 2 x s8>), %0(p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 2 x s8>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 2 x s8>), %2(p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
PseudoRET
...
@@ -313,9 +314,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY2]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s8>) = COPY $v8
- G_STORE %1(<vscale x 4 x s8>), %0(p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 4 x s8>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 4 x s8>), %2(p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
PseudoRET
...
@@ -344,9 +346,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 8 x s8>) = COPY $v8
- G_STORE %1(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 8 x s8>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 8 x s8>), %2(p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
PseudoRET
...
@@ -375,9 +378,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 16 x s8>) = COPY $v8m2
- G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 16 x s8>), %2(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
PseudoRET
...
@@ -406,9 +410,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY2]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 32 x s8>) = COPY $v8m4
- G_STORE %1(<vscale x 32 x s8>), %0(p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 32 x s8>) = COPY $v8m4
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 32 x s8>), %2(p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
PseudoRET
...
@@ -437,9 +442,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY2]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 64 x s8>) = COPY $v8m8
- G_STORE %1(<vscale x 64 x s8>), %0(p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 64 x s8>) = COPY $v8m8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 64 x s8>), %2(p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
PseudoRET
...
@@ -468,9 +474,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY2]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 1 x s16>) = COPY $v8
- G_STORE %1(<vscale x 1 x s16>), %0(p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 1 x s16>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 1 x s16>), %2(p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
PseudoRET
...
@@ -499,9 +506,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY2]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s16>) = COPY $v8
- G_STORE %1(<vscale x 2 x s16>), %0(p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 2 x s16>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 2 x s16>), %2(p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
PseudoRET
...
@@ -530,9 +538,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s16>) = COPY $v8
- G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 4 x s16>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 4 x s16>), %2(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
PseudoRET
...
@@ -561,9 +570,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY2]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 8 x s16>) = COPY $v8m2
- G_STORE %1(<vscale x 8 x s16>), %0(p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 8 x s16>) = COPY $v8m2
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 8 x s16>), %2(p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
PseudoRET
...
@@ -592,9 +602,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY2]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 16 x s16>) = COPY $v8m4
- G_STORE %1(<vscale x 16 x s16>), %0(p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 16 x s16>) = COPY $v8m4
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 16 x s16>), %2(p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
PseudoRET
...
@@ -623,9 +634,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY2]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 32 x s16>) = COPY $v8m8
- G_STORE %1(<vscale x 32 x s16>), %0(p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 32 x s16>) = COPY $v8m8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 32 x s16>), %2(p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
PseudoRET
...
@@ -654,9 +666,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY2]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 1 x s32>) = COPY $v8
- G_STORE %1(<vscale x 1 x s32>), %0(p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 1 x s32>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 1 x s32>), %2(p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
PseudoRET
...
@@ -685,9 +698,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s32>) = COPY $v8
- G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 2 x s32>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 2 x s32>), %2(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
PseudoRET
...
@@ -716,9 +730,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY2]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s32>) = COPY $v8m2
- G_STORE %1(<vscale x 4 x s32>), %0(p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 4 x s32>) = COPY $v8m2
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 4 x s32>), %2(p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
PseudoRET
...
@@ -747,9 +762,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY2]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 8 x s32>) = COPY $v8m4
- G_STORE %1(<vscale x 8 x s32>), %0(p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 8 x s32>) = COPY $v8m4
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 8 x s32>), %2(p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
PseudoRET
...
@@ -778,9 +794,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY2]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 16 x s32>) = COPY $v8m8
- G_STORE %1(<vscale x 16 x s32>), %0(p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 16 x s32>) = COPY $v8m8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 16 x s32>), %2(p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
PseudoRET
...
@@ -809,9 +826,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY2]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 1 x s64>) = COPY $v8
- G_STORE %1(<vscale x 1 x s64>), %0(p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 1 x s64>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 1 x s64>), %2(p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
PseudoRET
...
@@ -840,9 +858,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s64>) = COPY $v8m2
- G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 2 x s64>), %2(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
PseudoRET
...
@@ -871,9 +890,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY2]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s64>) = COPY $v8m4
- G_STORE %1(<vscale x 4 x s64>), %0(p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 4 x s64>) = COPY $v8m4
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 4 x s64>), %2(p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
PseudoRET
...
@@ -902,9 +922,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY2]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 8 x s64>) = COPY $v8m8
- G_STORE %1(<vscale x 8 x s64>), %0(p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 8 x s64>) = COPY $v8m8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 8 x s64>), %2(p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
PseudoRET
...
@@ -933,9 +954,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 16 x s8>) = COPY $v8m2
- G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 16 x s8>), %2(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
PseudoRET
...
@@ -964,9 +986,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 16 x s8>) = COPY $v8m2
- G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 16 x s8>), %2(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
PseudoRET
...
@@ -995,9 +1018,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 16 x s8>) = COPY $v8m2
- G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 16 x s8>), %2(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
PseudoRET
...
@@ -1026,9 +1050,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 16 x s8>) = COPY $v8m2
- G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 16 x s8>) = COPY $v8m2
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 16 x s8>), %2(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
PseudoRET
...
@@ -1059,10 +1084,11 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s16>) = COPY $v8
- %2:_(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 4 x s16>)
- G_STORE %2(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 4 x s16>) = COPY $v8
+ %2:vrb(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 4 x s16>)
+ %3:vrb(p0) = COPY %0(p0)
+ G_STORE %2(<vscale x 8 x s8>), %3(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
PseudoRET
...
@@ -1091,9 +1117,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s16>) = COPY $v8
- G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 4 x s16>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 4 x s16>), %2(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
PseudoRET
...
@@ -1122,9 +1149,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s16>) = COPY $v8
- G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 4 x s16>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 4 x s16>), %2(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
PseudoRET
...
@@ -1153,9 +1181,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s16>) = COPY $v8
- G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 4 x s16>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 4 x s16>), %2(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
PseudoRET
...
@@ -1184,9 +1213,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 4 x s16>) = COPY $v8
- G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 4 x s16>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 4 x s16>), %2(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
PseudoRET
...
@@ -1217,10 +1247,11 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s32>) = COPY $v8
- %2:_(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 2 x s32>)
- G_STORE %2(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 2 x s32>) = COPY $v8
+ %2:vrb(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 2 x s32>)
+ %3:vrb(p0) = COPY %0(p0)
+ G_STORE %2(<vscale x 8 x s8>), %3(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
PseudoRET
...
@@ -1249,9 +1280,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s32>) = COPY $v8
- G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 2 x s32>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 2 x s32>), %2(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
PseudoRET
...
@@ -1280,9 +1312,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s32>) = COPY $v8
- G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 2 x s32>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 2 x s32>), %2(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
PseudoRET
...
@@ -1311,9 +1344,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s32>) = COPY $v8
- G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 2 x s32>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 2 x s32>), %2(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
PseudoRET
...
@@ -1342,9 +1376,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s32>) = COPY $v8
- G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 2 x s32>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 2 x s32>), %2(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
PseudoRET
...
@@ -1375,10 +1410,11 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s64>) = COPY $v8m2
- %2:_(<vscale x 16 x s8>) = G_BITCAST %1(<vscale x 2 x s64>)
- G_STORE %2(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ %2:vrb(<vscale x 16 x s8>) = G_BITCAST %1(<vscale x 2 x s64>)
+ %3:vrb(p0) = COPY %0(p0)
+ G_STORE %2(<vscale x 16 x s8>), %3(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
PseudoRET
...
@@ -1407,9 +1443,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s64>) = COPY $v8m2
- G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 2 x s64>), %2(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
PseudoRET
...
@@ -1438,9 +1475,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s64>) = COPY $v8m2
- G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 2 x s64>), %2(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
PseudoRET
...
@@ -1469,9 +1507,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x s64>) = COPY $v8m2
- G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 2 x s64>) = COPY $v8m2
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 2 x s64>), %2(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
PseudoRET
...
@@ -1500,9 +1539,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY2]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 1 x p0>) = COPY $v8
- G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 1 x p0>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 1 x p0>), %2(p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
PseudoRET
...
@@ -1531,9 +1571,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY2]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 2 x p0>) = COPY $v8
- G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 2 x p0>) = COPY $v8
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 2 x p0>), %2(p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
PseudoRET
...
@@ -1562,9 +1603,10 @@ body: |
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY2]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
; RV64I-NEXT: PseudoRET
- %0:_(p0) = COPY $x10
- %1:_(<vscale x 8 x p0>) = COPY $v8m4
- G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+ %0:gprb(p0) = COPY $x10
+ %1:vrb(<vscale x 8 x p0>) = COPY $v8m4
+ %2:vrb(p0) = COPY %0(p0)
+ G_STORE %1(<vscale x 8 x p0>), %2(p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
PseudoRET
...
>From f28442a4619b88d08e7d7e53628c72a420c70fd4 Mon Sep 17 00:00:00 2001
From: Jiahan Xie <jx353 at cornell.edu>
Date: Tue, 30 Jul 2024 14:31:14 -0400
Subject: [PATCH 4/6] OpsMapping[1] is always GPR; use getFixedValue when type
is not vector
---
.../RISCV/GISel/RISCVRegisterBankInfo.cpp | 21 +-
.../GlobalISel/regbankselect/rvv/load.mir | 258 ++++++++++++------
.../GlobalISel/regbankselect/rvv/store.mir | 258 ++++++++++++------
3 files changed, 353 insertions(+), 184 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 6d7aeee83b0c6..45ebac32fed17 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -311,16 +311,15 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case TargetOpcode::G_LOAD: {
LLT Ty = MRI.getType(MI.getOperand(0).getReg());
TypeSize Size = Ty.getSizeInBits();
- if (Ty.isVector()) {
+ if (Ty.isVector())
OpdsMapping[0] = getVRBValueMapping(Size.getKnownMinValue());
- OpdsMapping[1] = getVRBValueMapping(Size.getKnownMinValue());
- } else {
+ else
OpdsMapping[0] = GPRValueMapping;
- OpdsMapping[1] = GPRValueMapping;
- }
+
+ OpdsMapping[1] = GPRValueMapping;
// Use FPR64 for s64 loads on rv32.
if (!Ty.isVector() && GPRSize == 32 &&
- Ty.getSizeInBits().getKnownMinValue() == 64) {
+ Ty.getSizeInBits().getFixedValue() == 64) {
assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
break;
@@ -341,17 +340,15 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
case TargetOpcode::G_STORE: {
LLT Ty = MRI.getType(MI.getOperand(0).getReg());
TypeSize Size = Ty.getSizeInBits();
- if (Ty.isVector()) {
+ if (Ty.isVector())
OpdsMapping[0] = getVRBValueMapping(Size.getKnownMinValue());
- OpdsMapping[1] = getVRBValueMapping(Size.getKnownMinValue());
- } else {
+ else
OpdsMapping[0] = GPRValueMapping;
- OpdsMapping[1] = GPRValueMapping;
- }
+ OpdsMapping[1] = GPRValueMapping;
// Use FPR64 for s64 stores on rv32.
if (!Ty.isVector() && GPRSize == 32 &&
- Ty.getSizeInBits().getKnownMinValue() == 64) {
+ Ty.getSizeInBits().getFixedValue() == 64) {
assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
break;
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
index f542d1b7e6a5d..73ac2702cf9d4 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
@@ -238,7 +238,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -247,7 +248,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -270,7 +272,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -279,7 +282,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -302,7 +306,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -311,7 +316,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -334,7 +340,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -343,7 +350,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -366,7 +374,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -375,7 +384,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
@@ -398,7 +408,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m4
;
@@ -407,7 +418,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:gprb(p0) = COPY $x10
@@ -430,7 +442,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m8
;
@@ -439,7 +452,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m8
%0:gprb(p0) = COPY $x10
@@ -462,7 +476,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -471,7 +486,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -494,7 +510,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -503,7 +520,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -526,7 +544,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -535,7 +554,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -558,7 +578,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -567,7 +588,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
@@ -590,7 +612,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8m4
;
@@ -599,7 +622,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:gprb(p0) = COPY $x10
@@ -622,7 +646,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8m8
;
@@ -631,7 +656,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8m8
%0:gprb(p0) = COPY $x10
@@ -654,7 +680,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -663,7 +690,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -686,7 +714,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -695,7 +724,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -718,7 +748,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -727,7 +758,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
@@ -750,7 +782,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8m4
;
@@ -759,7 +792,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:gprb(p0) = COPY $x10
@@ -782,7 +816,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8m8
;
@@ -791,7 +826,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8m8
%0:gprb(p0) = COPY $x10
@@ -814,7 +850,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -823,7 +860,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -846,7 +884,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -855,7 +894,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
@@ -878,7 +918,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m4
;
@@ -887,7 +928,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:gprb(p0) = COPY $x10
@@ -910,7 +952,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m8
;
@@ -919,7 +962,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m8
%0:gprb(p0) = COPY $x10
@@ -942,7 +986,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -951,7 +996,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
@@ -974,7 +1020,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -983,7 +1030,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
@@ -1006,7 +1054,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -1015,7 +1064,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
@@ -1038,7 +1088,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -1047,7 +1098,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
@@ -1070,7 +1122,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
; RV32I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
@@ -1080,7 +1133,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
@@ -1105,7 +1159,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1114,7 +1169,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -1137,7 +1193,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1146,7 +1203,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -1169,7 +1227,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1178,7 +1237,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -1201,7 +1261,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1210,7 +1271,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -1233,7 +1295,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
; RV32I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
@@ -1243,7 +1306,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
@@ -1268,7 +1332,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1277,7 +1342,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -1300,7 +1366,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1309,7 +1376,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -1332,7 +1400,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1341,7 +1410,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -1364,7 +1434,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1373,7 +1444,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -1396,7 +1468,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m2
@@ -1406,7 +1479,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
@@ -1431,7 +1505,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -1440,7 +1515,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
@@ -1463,7 +1539,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -1472,7 +1549,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
@@ -1495,7 +1573,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -1504,7 +1583,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
@@ -1527,7 +1607,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1536,7 +1617,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -1559,7 +1641,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1568,7 +1651,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
@@ -1591,7 +1675,8 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
; RV32I-NEXT: PseudoRET implicit $v8m4
;
@@ -1600,7 +1685,8 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:gprb(p0) = COPY $x10
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
index 83cf77af25765..d4f6f6661217d 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
@@ -239,7 +239,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY2]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY3]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx1i8
@@ -248,7 +249,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY2]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY3]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 1 x s8>) = COPY $v8
@@ -271,7 +273,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY2]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY3]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i8
@@ -280,7 +283,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY2]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY3]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s8>) = COPY $v8
@@ -303,7 +307,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY2]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY3]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i8
@@ -312,7 +317,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY2]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY3]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s8>) = COPY $v8
@@ -335,7 +341,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY3]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx8i8
@@ -344,7 +351,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY3]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 8 x s8>) = COPY $v8
@@ -367,7 +375,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx16i8
@@ -376,7 +385,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 16 x s8>) = COPY $v8m2
@@ -399,7 +409,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY2]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY3]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx32i8
@@ -408,7 +419,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY2]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY3]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 32 x s8>) = COPY $v8m4
@@ -431,7 +443,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY2]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY3]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx64i8
@@ -440,7 +453,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY2]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY3]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 64 x s8>) = COPY $v8m8
@@ -463,7 +477,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY2]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY3]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx1i16
@@ -472,7 +487,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY2]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY3]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 1 x s16>) = COPY $v8
@@ -495,7 +511,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY2]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY3]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i16
@@ -504,7 +521,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY2]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY3]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s16>) = COPY $v8
@@ -527,7 +545,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i16
@@ -536,7 +555,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s16>) = COPY $v8
@@ -559,7 +579,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY2]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY3]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx8i16
@@ -568,7 +589,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY2]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY3]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 8 x s16>) = COPY $v8m2
@@ -591,7 +613,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY2]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY3]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx16i16
@@ -600,7 +623,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY2]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY3]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 16 x s16>) = COPY $v8m4
@@ -623,7 +647,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY2]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY3]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx32i16
@@ -632,7 +657,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY2]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY3]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 32 x s16>) = COPY $v8m8
@@ -655,7 +681,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY2]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY3]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx1i32
@@ -664,7 +691,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY2]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY3]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 1 x s32>) = COPY $v8
@@ -687,7 +715,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i32
@@ -696,7 +725,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s32>) = COPY $v8
@@ -719,7 +749,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY2]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY3]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i32
@@ -728,7 +759,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY2]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY3]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s32>) = COPY $v8m2
@@ -751,7 +783,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY2]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY3]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx8i32
@@ -760,7 +793,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY2]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY3]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 8 x s32>) = COPY $v8m4
@@ -783,7 +817,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY2]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY3]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx16i32
@@ -792,7 +827,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY2]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY3]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 16 x s32>) = COPY $v8m8
@@ -815,7 +851,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY2]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY3]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx1i64
@@ -824,7 +861,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY2]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY3]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 1 x s64>) = COPY $v8
@@ -847,7 +885,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY3]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i64
@@ -856,7 +895,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY3]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s64>) = COPY $v8m2
@@ -879,7 +919,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY2]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY3]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i64
@@ -888,7 +929,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY2]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY3]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s64>) = COPY $v8m4
@@ -911,7 +953,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY2]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY3]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx8i64
@@ -920,7 +963,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY2]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY3]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 8 x s64>) = COPY $v8m8
@@ -943,7 +987,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx16i8_align1
@@ -952,7 +997,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 16 x s8>) = COPY $v8m2
@@ -975,7 +1021,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx16i8_align2
@@ -984,7 +1031,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 16 x s8>) = COPY $v8m2
@@ -1007,7 +1055,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx16i8_align16
@@ -1016,7 +1065,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 16 x s8>) = COPY $v8m2
@@ -1039,7 +1089,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx16i8_align64
@@ -1048,7 +1099,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 16 x s8>) = COPY $v8m2
@@ -1072,7 +1124,8 @@ body: |
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY3]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i16_align1
@@ -1082,7 +1135,8 @@ body: |
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY3]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s16>) = COPY $v8
@@ -1106,7 +1160,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i16_align2
@@ -1115,7 +1170,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s16>) = COPY $v8
@@ -1138,7 +1194,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i16_align4
@@ -1147,7 +1204,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s16>) = COPY $v8
@@ -1170,7 +1228,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i16_align8
@@ -1179,7 +1238,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s16>) = COPY $v8
@@ -1202,7 +1262,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i16_align16
@@ -1211,7 +1272,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY2]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s16>) = COPY $v8
@@ -1235,7 +1297,8 @@ body: |
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY3]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i32_align2
@@ -1245,7 +1308,8 @@ body: |
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY2]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY3]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s32>) = COPY $v8
@@ -1269,7 +1333,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i32_align4
@@ -1278,7 +1343,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s32>) = COPY $v8
@@ -1301,7 +1367,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i32_align8
@@ -1310,7 +1377,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s32>) = COPY $v8
@@ -1333,7 +1401,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i32_align16
@@ -1342,7 +1411,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s32>) = COPY $v8
@@ -1365,7 +1435,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i32_align256
@@ -1374,7 +1445,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY2]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s32>) = COPY $v8
@@ -1398,7 +1470,8 @@ body: |
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i64_align4
@@ -1408,7 +1481,8 @@ body: |
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY2]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s64>) = COPY $v8m2
@@ -1432,7 +1506,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY3]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i64_align8
@@ -1441,7 +1516,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY3]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s64>) = COPY $v8m2
@@ -1464,7 +1540,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY3]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i64_align16
@@ -1473,7 +1550,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY3]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s64>) = COPY $v8m2
@@ -1496,7 +1574,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY3]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i64_align32
@@ -1505,7 +1584,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY2]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY3]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s64>) = COPY $v8m2
@@ -1528,7 +1608,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x p0>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY2]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY3]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx1ptr
@@ -1537,7 +1618,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x p0>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY2]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY3]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 1 x p0>) = COPY $v8
@@ -1560,7 +1642,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x p0>) = COPY $v8
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY2]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY3]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2ptr
@@ -1569,7 +1652,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x p0>) = COPY $v8
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY2]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY3]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x p0>) = COPY $v8
@@ -1592,7 +1676,8 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x p0>) = COPY $v8m4
; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY2]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+ ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY3]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx8ptr
@@ -1601,7 +1686,8 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x p0>) = COPY $v8m4
; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY2]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+ ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY3]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 8 x p0>) = COPY $v8m4
>From 690c3c8a64d4bd9ecde3e725ac441c1e3259d268 Mon Sep 17 00:00:00 2001
From: Jiahan Xie <jx353 at cornell.edu>
Date: Tue, 30 Jul 2024 15:13:35 -0400
Subject: [PATCH 5/6] use getFixedValue() from size directly
---
llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp | 6 ++----
1 file changed, 2 insertions(+), 4 deletions(-)
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
index 45ebac32fed17..f7279bbbd6488 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVRegisterBankInfo.cpp
@@ -318,8 +318,7 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OpdsMapping[1] = GPRValueMapping;
// Use FPR64 for s64 loads on rv32.
- if (!Ty.isVector() && GPRSize == 32 &&
- Ty.getSizeInBits().getFixedValue() == 64) {
+ if (!Ty.isVector() && GPRSize == 32 && Size.getFixedValue() == 64) {
assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
break;
@@ -347,8 +346,7 @@ RISCVRegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OpdsMapping[1] = GPRValueMapping;
// Use FPR64 for s64 stores on rv32.
- if (!Ty.isVector() && GPRSize == 32 &&
- Ty.getSizeInBits().getFixedValue() == 64) {
+ if (!Ty.isVector() && GPRSize == 32 && Size.getFixedValue() == 64) {
assert(MF.getSubtarget<RISCVSubtarget>().hasStdExtD());
OpdsMapping[0] = getFPValueMapping(Ty.getSizeInBits());
break;
>From ce127646206fed04c806b203737e09fe80ae62b4 Mon Sep 17 00:00:00 2001
From: Jiahan Xie <jx353 at cornell.edu>
Date: Tue, 30 Jul 2024 23:34:25 -0400
Subject: [PATCH 6/6] correct testing code
---
.../GlobalISel/regbankselect/rvv/load.mir | 475 +++++-------------
.../GlobalISel/regbankselect/rvv/store.mir | 475 +++++-------------
2 files changed, 258 insertions(+), 692 deletions(-)
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
index 73ac2702cf9d4..059925c75c13f 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/load.mir
@@ -222,8 +222,6 @@
ret <vscale x 8 x ptr> %va
}
- attributes #0 = { "target-features"="+v" }
-
...
---
name: vload_nx1i8
@@ -237,9 +235,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -247,14 +243,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 1 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ %1:vrb(<vscale x 1 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
$v8 = COPY %1(<vscale x 1 x s8>)
PseudoRET implicit $v8
@@ -271,9 +264,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -281,14 +272,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 2 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ %1:vrb(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
$v8 = COPY %1(<vscale x 2 x s8>)
PseudoRET implicit $v8
@@ -305,9 +293,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -315,14 +301,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 4 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ %1:vrb(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
$v8 = COPY %1(<vscale x 4 x s8>)
PseudoRET implicit $v8
@@ -339,9 +322,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -349,14 +330,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 8 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ %1:vrb(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
$v8 = COPY %1(<vscale x 8 x s8>)
PseudoRET implicit $v8
@@ -373,9 +351,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -383,14 +359,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 16 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ %1:vrb(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
@@ -407,9 +380,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m4
;
@@ -417,14 +388,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 32 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ %1:vrb(<vscale x 32 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
$v8m4 = COPY %1(<vscale x 32 x s8>)
PseudoRET implicit $v8m4
@@ -441,9 +409,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m8
;
@@ -451,14 +417,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 64 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ %1:vrb(<vscale x 64 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
$v8m8 = COPY %1(<vscale x 64 x s8>)
PseudoRET implicit $v8m8
@@ -475,9 +438,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -485,14 +446,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 1 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ %1:vrb(<vscale x 1 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
$v8 = COPY %1(<vscale x 1 x s16>)
PseudoRET implicit $v8
@@ -509,9 +467,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -519,14 +475,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 2 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ %1:vrb(<vscale x 2 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
$v8 = COPY %1(<vscale x 2 x s16>)
PseudoRET implicit $v8
@@ -543,9 +496,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -553,14 +504,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 4 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ %1:vrb(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
@@ -577,9 +525,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -587,14 +533,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 8 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ %1:vrb(<vscale x 8 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 8 x s16>)
PseudoRET implicit $v8m2
@@ -611,9 +554,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8m4
;
@@ -621,14 +562,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 16 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ %1:vrb(<vscale x 16 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
$v8m4 = COPY %1(<vscale x 16 x s16>)
PseudoRET implicit $v8m4
@@ -645,9 +583,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8m8
;
@@ -655,14 +591,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8m8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 32 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ %1:vrb(<vscale x 32 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
$v8m8 = COPY %1(<vscale x 32 x s16>)
PseudoRET implicit $v8m8
@@ -679,9 +612,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -689,14 +620,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 1 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ %1:vrb(<vscale x 1 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
$v8 = COPY %1(<vscale x 1 x s32>)
PseudoRET implicit $v8
@@ -713,9 +641,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -723,14 +649,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 2 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ %1:vrb(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
@@ -747,9 +670,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -757,14 +678,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 4 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ %1:vrb(<vscale x 4 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 4 x s32>)
PseudoRET implicit $v8m2
@@ -781,9 +699,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8m4
;
@@ -791,14 +707,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 8 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ %1:vrb(<vscale x 8 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
$v8m4 = COPY %1(<vscale x 8 x s32>)
PseudoRET implicit $v8m4
@@ -815,9 +728,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8m8
;
@@ -825,14 +736,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8m8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 16 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ %1:vrb(<vscale x 16 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
$v8m8 = COPY %1(<vscale x 16 x s32>)
PseudoRET implicit $v8m8
@@ -849,9 +757,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -859,14 +765,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 1 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ %1:vrb(<vscale x 1 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
$v8 = COPY %1(<vscale x 1 x s64>)
PseudoRET implicit $v8
@@ -883,9 +786,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -893,14 +794,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 2 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ %1:vrb(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
@@ -917,9 +815,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m4
;
@@ -927,14 +823,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 4 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ %1:vrb(<vscale x 4 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
$v8m4 = COPY %1(<vscale x 4 x s64>)
PseudoRET implicit $v8m4
@@ -951,9 +844,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
; RV32I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m8
;
@@ -961,14 +852,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
; RV64I-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 8 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ %1:vrb(<vscale x 8 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
$v8m8 = COPY %1(<vscale x 8 x s64>)
PseudoRET implicit $v8m8
@@ -985,9 +873,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -995,14 +881,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 16 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ %1:vrb(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
$v8m2 = COPY %1(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
@@ -1019,9 +902,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -1029,14 +910,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 16 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ %1:vrb(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
$v8m2 = COPY %1(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
@@ -1053,9 +931,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -1063,14 +939,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 16 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ %1:vrb(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
@@ -1087,9 +960,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -1097,14 +968,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 16 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ %1:vrb(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
$v8m2 = COPY %1(<vscale x 16 x s8>)
PseudoRET implicit $v8m2
@@ -1121,9 +989,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
; RV32I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
@@ -1132,15 +998,12 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %3:vrb(p0) = COPY %0(p0)
- %2:vrb(<vscale x 8 x s8>) = G_LOAD %3(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+ %2:vrb(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
%1:vrb(<vscale x 4 x s16>) = G_BITCAST %2(<vscale x 8 x s8>)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
@@ -1158,9 +1021,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1168,14 +1029,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 4 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ %1:vrb(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
@@ -1192,9 +1050,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1202,14 +1058,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 4 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ %1:vrb(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
@@ -1226,9 +1079,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1236,14 +1087,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 4 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ %1:vrb(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
@@ -1260,9 +1108,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1270,14 +1116,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 4 x s16>) = G_LOAD %2(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ %1:vrb(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
$v8 = COPY %1(<vscale x 4 x s16>)
PseudoRET implicit $v8
@@ -1294,9 +1137,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
; RV32I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
@@ -1305,15 +1146,12 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
; RV64I-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %3:vrb(p0) = COPY %0(p0)
- %2:vrb(<vscale x 8 x s8>) = G_LOAD %3(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+ %2:vrb(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
%1:vrb(<vscale x 2 x s32>) = G_BITCAST %2(<vscale x 8 x s8>)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
@@ -1331,9 +1169,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1341,14 +1177,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 2 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ %1:vrb(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
@@ -1365,9 +1198,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1375,14 +1206,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 2 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ %1:vrb(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
@@ -1399,9 +1227,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1409,14 +1235,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 2 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ %1:vrb(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
@@ -1433,9 +1256,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1443,14 +1264,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 2 x s32>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ %1:vrb(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
$v8 = COPY %1(<vscale x 2 x s32>)
PseudoRET implicit $v8
@@ -1467,9 +1285,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
; RV32I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m2
@@ -1478,15 +1294,12 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
; RV64I-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
- %3:vrb(p0) = COPY %0(p0)
- %2:vrb(<vscale x 16 x s8>) = G_LOAD %3(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+ %2:vrb(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
%1:vrb(<vscale x 2 x s64>) = G_BITCAST %2(<vscale x 16 x s8>)
$v8m2 = COPY %1(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
@@ -1504,9 +1317,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -1514,14 +1325,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 2 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ %1:vrb(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
$v8m2 = COPY %1(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
@@ -1538,9 +1346,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -1548,14 +1354,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 2 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ %1:vrb(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
$v8m2 = COPY %1(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
@@ -1572,9 +1375,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
; RV32I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV32I-NEXT: PseudoRET implicit $v8m2
;
@@ -1582,14 +1383,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
; RV64I-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
; RV64I-NEXT: PseudoRET implicit $v8m2
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 2 x s64>) = G_LOAD %2(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ %1:vrb(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
$v8m2 = COPY %1(<vscale x 2 x s64>)
PseudoRET implicit $v8m2
@@ -1606,9 +1404,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1616,14 +1412,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 1 x p0>) = G_LOAD %2(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ %1:vrb(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
$v8 = COPY %1(<vscale x 1 x p0>)
PseudoRET implicit $v8
@@ -1640,9 +1433,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
; RV32I-NEXT: PseudoRET implicit $v8
;
@@ -1650,14 +1441,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
; RV64I-NEXT: PseudoRET implicit $v8
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 2 x p0>) = G_LOAD %2(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ %1:vrb(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
$v8 = COPY %1(<vscale x 2 x p0>)
PseudoRET implicit $v8
@@ -1674,9 +1462,7 @@ body: |
; RV32I: liveins: $x10
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
; RV32I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
; RV32I-NEXT: PseudoRET implicit $v8m4
;
@@ -1684,14 +1470,11 @@ body: |
; RV64I: liveins: $x10
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
- ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:gprb(p0) = COPY [[COPY1]](p0)
- ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY2]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
; RV64I-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
; RV64I-NEXT: PseudoRET implicit $v8m4
%0:gprb(p0) = COPY $x10
- %2:vrb(p0) = COPY %0(p0)
- %1:vrb(<vscale x 8 x p0>) = G_LOAD %2(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ %1:vrb(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
$v8m4 = COPY %1(<vscale x 8 x p0>)
PseudoRET implicit $v8m4
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
index d4f6f6661217d..d6db38cb71d11 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/regbankselect/rvv/store.mir
@@ -222,8 +222,6 @@
ret void
}
- attributes #0 = { "target-features"="+v" }
-
...
---
name: vstore_nx1i8
@@ -238,9 +236,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY3]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx1i8
@@ -248,14 +244,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s8>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY3]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 1 x s8>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 1 x s8>), %2(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+ G_STORE %1(<vscale x 1 x s8>), %0(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
PseudoRET
...
@@ -272,9 +265,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY3]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i8
@@ -282,14 +273,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s8>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY3]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s8>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 2 x s8>), %2(p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+ G_STORE %1(<vscale x 2 x s8>), %0(p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
PseudoRET
...
@@ -306,9 +294,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY3]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i8
@@ -316,14 +302,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s8>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY3]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s8>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 4 x s8>), %2(p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+ G_STORE %1(<vscale x 4 x s8>), %0(p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
PseudoRET
...
@@ -340,9 +323,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY3]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx8i8
@@ -350,14 +331,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s8>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY3]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 8 x s8>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 8 x s8>), %2(p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+ G_STORE %1(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
PseudoRET
...
@@ -374,9 +352,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx16i8
@@ -384,14 +360,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 16 x s8>) = COPY $v8m2
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 16 x s8>), %2(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
PseudoRET
...
@@ -408,9 +381,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY3]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx32i8
@@ -418,14 +389,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s8>) = COPY $v8m4
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY3]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 32 x s8>) = COPY $v8m4
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 32 x s8>), %2(p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+ G_STORE %1(<vscale x 32 x s8>), %0(p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
PseudoRET
...
@@ -442,9 +410,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY3]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx64i8
@@ -452,14 +418,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 64 x s8>) = COPY $v8m8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY3]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 64 x s8>) = COPY $v8m8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 64 x s8>), %2(p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+ G_STORE %1(<vscale x 64 x s8>), %0(p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
PseudoRET
...
@@ -476,9 +439,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY3]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx1i16
@@ -486,14 +447,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s16>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY3]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 1 x s16>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 1 x s16>), %2(p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+ G_STORE %1(<vscale x 1 x s16>), %0(p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
PseudoRET
...
@@ -510,9 +468,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY3]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i16
@@ -520,14 +476,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s16>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY3]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s16>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 2 x s16>), %2(p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+ G_STORE %1(<vscale x 2 x s16>), %0(p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
PseudoRET
...
@@ -544,9 +497,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i16
@@ -554,14 +505,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s16>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 4 x s16>), %2(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
PseudoRET
...
@@ -578,9 +526,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY3]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx8i16
@@ -588,14 +534,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s16>) = COPY $v8m2
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY3]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 8 x s16>) = COPY $v8m2
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 8 x s16>), %2(p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+ G_STORE %1(<vscale x 8 x s16>), %0(p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
PseudoRET
...
@@ -612,9 +555,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY3]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx16i16
@@ -622,14 +563,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s16>) = COPY $v8m4
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY3]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 16 x s16>) = COPY $v8m4
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 16 x s16>), %2(p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+ G_STORE %1(<vscale x 16 x s16>), %0(p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
PseudoRET
...
@@ -646,9 +584,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY3]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx32i16
@@ -656,14 +592,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 32 x s16>) = COPY $v8m8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY3]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 32 x s16>) = COPY $v8m8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 32 x s16>), %2(p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+ G_STORE %1(<vscale x 32 x s16>), %0(p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
PseudoRET
...
@@ -680,9 +613,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY3]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx1i32
@@ -690,14 +621,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s32>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY3]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 1 x s32>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 1 x s32>), %2(p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+ G_STORE %1(<vscale x 1 x s32>), %0(p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
PseudoRET
...
@@ -714,9 +642,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i32
@@ -724,14 +650,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s32>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 2 x s32>), %2(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
PseudoRET
...
@@ -748,9 +671,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY3]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i32
@@ -758,14 +679,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s32>) = COPY $v8m2
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY3]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s32>) = COPY $v8m2
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 4 x s32>), %2(p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+ G_STORE %1(<vscale x 4 x s32>), %0(p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
PseudoRET
...
@@ -782,9 +700,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY3]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx8i32
@@ -792,14 +708,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s32>) = COPY $v8m4
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY3]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 8 x s32>) = COPY $v8m4
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 8 x s32>), %2(p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+ G_STORE %1(<vscale x 8 x s32>), %0(p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
PseudoRET
...
@@ -816,9 +729,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY3]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx16i32
@@ -826,14 +737,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s32>) = COPY $v8m8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY3]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 16 x s32>) = COPY $v8m8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 16 x s32>), %2(p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+ G_STORE %1(<vscale x 16 x s32>), %0(p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
PseudoRET
...
@@ -850,9 +758,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY3]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx1i64
@@ -860,14 +766,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x s64>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY3]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 1 x s64>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 1 x s64>), %2(p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+ G_STORE %1(<vscale x 1 x s64>), %0(p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
PseudoRET
...
@@ -884,9 +787,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY3]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i64
@@ -894,14 +795,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY3]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s64>) = COPY $v8m2
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 2 x s64>), %2(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
PseudoRET
...
@@ -918,9 +816,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY3]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i64
@@ -928,14 +824,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s64>) = COPY $v8m4
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY3]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s64>) = COPY $v8m4
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 4 x s64>), %2(p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+ G_STORE %1(<vscale x 4 x s64>), %0(p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
PseudoRET
...
@@ -952,9 +845,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY3]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx8i64
@@ -962,14 +853,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x s64>) = COPY $v8m8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY3]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 8 x s64>) = COPY $v8m8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 8 x s64>), %2(p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+ G_STORE %1(<vscale x 8 x s64>), %0(p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
PseudoRET
...
@@ -986,9 +874,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx16i8_align1
@@ -996,14 +882,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 16 x s8>) = COPY $v8m2
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 16 x s8>), %2(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
PseudoRET
...
@@ -1020,9 +903,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx16i8_align2
@@ -1030,14 +911,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 16 x s8>) = COPY $v8m2
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 16 x s8>), %2(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
PseudoRET
...
@@ -1054,9 +932,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx16i8_align16
@@ -1064,14 +940,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 16 x s8>) = COPY $v8m2
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 16 x s8>), %2(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
PseudoRET
...
@@ -1088,9 +961,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx16i8_align64
@@ -1098,14 +969,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 16 x s8>) = COPY $v8m2
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 16 x s8>) = COPY $v8m2
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 16 x s8>), %2(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
PseudoRET
...
@@ -1123,9 +991,7 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY3]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+ ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i16_align1
@@ -1134,15 +1000,12 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY3]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+ ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s16>) = COPY $v8
%2:vrb(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 4 x s16>)
- %3:vrb(p0) = COPY %0(p0)
- G_STORE %2(<vscale x 8 x s8>), %3(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+ G_STORE %2(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
PseudoRET
...
@@ -1159,9 +1022,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i16_align2
@@ -1169,14 +1030,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s16>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 4 x s16>), %2(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
PseudoRET
...
@@ -1193,9 +1051,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i16_align4
@@ -1203,14 +1059,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s16>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 4 x s16>), %2(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
PseudoRET
...
@@ -1227,9 +1080,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i16_align8
@@ -1237,14 +1088,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s16>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 4 x s16>), %2(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
PseudoRET
...
@@ -1261,9 +1109,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx4i16_align16
@@ -1271,14 +1117,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 4 x s16>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY3]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 4 x s16>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 4 x s16>), %2(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
PseudoRET
...
@@ -1296,9 +1139,7 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY3]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+ ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i32_align2
@@ -1307,15 +1148,12 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY3]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+ ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s32>) = COPY $v8
%2:vrb(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 2 x s32>)
- %3:vrb(p0) = COPY %0(p0)
- G_STORE %2(<vscale x 8 x s8>), %3(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+ G_STORE %2(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
PseudoRET
...
@@ -1332,9 +1170,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i32_align4
@@ -1342,14 +1178,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s32>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 2 x s32>), %2(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
PseudoRET
...
@@ -1366,9 +1199,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i32_align8
@@ -1376,14 +1207,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s32>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 2 x s32>), %2(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
PseudoRET
...
@@ -1400,9 +1228,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i32_align16
@@ -1410,14 +1236,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s32>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 2 x s32>), %2(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
PseudoRET
...
@@ -1434,9 +1257,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i32_align256
@@ -1444,14 +1265,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s32>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY3]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s32>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 2 x s32>), %2(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
PseudoRET
...
@@ -1469,9 +1287,7 @@ body: |
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
; RV32I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+ ; RV32I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i64_align4
@@ -1480,15 +1296,12 @@ body: |
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
; RV64I-NEXT: [[BITCAST:%[0-9]+]]:vrb(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY3]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+ ; RV64I-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s64>) = COPY $v8m2
%2:vrb(<vscale x 16 x s8>) = G_BITCAST %1(<vscale x 2 x s64>)
- %3:vrb(p0) = COPY %0(p0)
- G_STORE %2(<vscale x 16 x s8>), %3(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+ G_STORE %2(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
PseudoRET
...
@@ -1505,9 +1318,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY3]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i64_align8
@@ -1515,14 +1326,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY3]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s64>) = COPY $v8m2
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 2 x s64>), %2(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
PseudoRET
...
@@ -1539,9 +1347,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY3]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i64_align16
@@ -1549,14 +1355,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY3]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s64>) = COPY $v8m2
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 2 x s64>), %2(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
PseudoRET
...
@@ -1573,9 +1376,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY3]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2i64_align32
@@ -1583,14 +1384,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x s64>) = COPY $v8m2
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY3]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x s64>) = COPY $v8m2
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 2 x s64>), %2(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
PseudoRET
...
@@ -1607,9 +1405,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x p0>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY3]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx1ptr
@@ -1617,14 +1413,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 1 x p0>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY3]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 1 x p0>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 1 x p0>), %2(p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+ G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
PseudoRET
...
@@ -1641,9 +1434,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x p0>) = COPY $v8
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY3]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx2ptr
@@ -1651,14 +1442,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 2 x p0>) = COPY $v8
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY3]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 2 x p0>) = COPY $v8
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 2 x p0>), %2(p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+ G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
PseudoRET
...
@@ -1675,9 +1463,7 @@ body: |
; RV32I-NEXT: {{ $}}
; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x p0>) = COPY $v8m4
- ; RV32I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV32I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY3]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+ ; RV32I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
; RV32I-NEXT: PseudoRET
;
; RV64I-LABEL: name: vstore_nx8ptr
@@ -1685,14 +1471,11 @@ body: |
; RV64I-NEXT: {{ $}}
; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(<vscale x 8 x p0>) = COPY $v8m4
- ; RV64I-NEXT: [[COPY2:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
- ; RV64I-NEXT: [[COPY3:%[0-9]+]]:gprb(p0) = COPY [[COPY2]](p0)
- ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY3]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+ ; RV64I-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
; RV64I-NEXT: PseudoRET
%0:gprb(p0) = COPY $x10
%1:vrb(<vscale x 8 x p0>) = COPY $v8m4
- %2:vrb(p0) = COPY %0(p0)
- G_STORE %1(<vscale x 8 x p0>), %2(p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+ G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
PseudoRET
...
More information about the llvm-commits
mailing list