[llvm] a0d8fa5 - [RISCV][GlobalISel] Legalize Scalable Vector Loads and Stores (#84965)
via llvm-commits
llvm-commits at lists.llvm.org
Wed Jul 31 16:00:19 PDT 2024
Author: Jiahan Xie
Date: 2024-07-31T19:00:15-04:00
New Revision: a0d8fa5d3a7e05d30004dd4faeb45c1a96fd8769
URL: https://github.com/llvm/llvm-project/commit/a0d8fa5d3a7e05d30004dd4faeb45c1a96fd8769
DIFF: https://github.com/llvm/llvm-project/commit/a0d8fa5d3a7e05d30004dd4faeb45c1a96fd8769.diff
LOG: [RISCV][GlobalISel] Legalize Scalable Vector Loads and Stores (#84965)
This patch supports legalizing load and store instruction for scalable
vectors in RISCV
Added:
llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
Modified:
llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
llvm/lib/CodeGen/MIRParser/MIParser.cpp
llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
Removed:
################################################################################
diff --git a/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp b/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
index 2c77ed8b06008..8fe48195c610b 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
@@ -194,7 +194,8 @@ LegalityPredicate LegalityPredicates::memSizeNotByteSizePow2(unsigned MMOIdx) {
return [=](const LegalityQuery &Query) {
const LLT MemTy = Query.MMODescrs[MMOIdx].MemoryTy;
return !MemTy.isByteSized() ||
- !llvm::has_single_bit<uint32_t>(MemTy.getSizeInBytes());
+ !llvm::has_single_bit<uint32_t>(
+ MemTy.getSizeInBytes().getKnownMinValue());
};
}
diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index 1d16729aa3387..bf10794a100eb 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -3388,7 +3388,7 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) {
if (expectAndConsume(MIToken::rparen))
return true;
- Size = MemoryType.getSizeInBytes();
+ Size = MemoryType.getSizeInBytes().getKnownMinValue();
}
MachinePointerInfo Ptr = MachinePointerInfo();
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index f033ea7250030..4e583d96335d9 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -19,6 +19,7 @@
#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
#include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
#include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
#include "llvm/CodeGen/MachineRegisterInfo.h"
#include "llvm/CodeGen/TargetOpcodes.h"
#include "llvm/CodeGen/ValueTypes.h"
@@ -67,6 +68,17 @@ typeIsLegalBoolVec(unsigned TypeIdx, std::initializer_list<LLT> BoolVecTys,
return all(typeInSet(TypeIdx, BoolVecTys), P);
}
+static LegalityPredicate typeIsLegalPtrVec(unsigned TypeIdx,
+ std::initializer_list<LLT> PtrVecTys,
+ const RISCVSubtarget &ST) {
+ LegalityPredicate P = [=, &ST](const LegalityQuery &Query) {
+ return ST.hasVInstructions() &&
+ (Query.Types[TypeIdx].getElementCount().getKnownMinValue() != 1 ||
+ ST.getELen() == 64);
+ };
+ return all(typeInSet(TypeIdx, PtrVecTys), P);
+}
+
RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
: STI(ST), XLen(STI.getXLen()), sXLen(LLT::scalar(XLen)) {
const LLT sDoubleXLen = LLT::scalar(2 * XLen);
@@ -111,6 +123,11 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
const LLT nxv4s64 = LLT::scalable_vector(4, s64);
const LLT nxv8s64 = LLT::scalable_vector(8, s64);
+ const LLT nxv1p0 = LLT::scalable_vector(1, p0);
+ const LLT nxv2p0 = LLT::scalable_vector(2, p0);
+ const LLT nxv4p0 = LLT::scalable_vector(4, p0);
+ const LLT nxv8p0 = LLT::scalable_vector(8, p0);
+
using namespace TargetOpcode;
auto BoolVecTys = {nxv1s1, nxv2s1, nxv4s1, nxv8s1, nxv16s1, nxv32s1, nxv64s1};
@@ -120,6 +137,8 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
nxv1s64, nxv2s64, nxv4s64, nxv8s64};
+ auto PtrVecTys = {nxv1p0, nxv2p0, nxv4p0, nxv8p0};
+
getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
.legalFor({s32, sXLen})
.legalIf(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST))
@@ -266,6 +285,23 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
{s32, p0, s16, 16},
{s32, p0, s32, 32},
{p0, p0, sXLen, XLen}});
+ if (ST.hasVInstructions())
+ LoadStoreActions.legalForTypesWithMemDesc({{nxv2s8, p0, nxv2s8, 8},
+ {nxv4s8, p0, nxv4s8, 8},
+ {nxv8s8, p0, nxv8s8, 8},
+ {nxv16s8, p0, nxv16s8, 8},
+ {nxv32s8, p0, nxv32s8, 8},
+ {nxv64s8, p0, nxv64s8, 8},
+ {nxv2s16, p0, nxv2s16, 16},
+ {nxv4s16, p0, nxv4s16, 16},
+ {nxv8s16, p0, nxv8s16, 16},
+ {nxv16s16, p0, nxv16s16, 16},
+ {nxv32s16, p0, nxv32s16, 16},
+ {nxv2s32, p0, nxv2s32, 32},
+ {nxv4s32, p0, nxv4s32, 32},
+ {nxv8s32, p0, nxv8s32, 32},
+ {nxv16s32, p0, nxv16s32, 32}});
+
auto &ExtLoadActions =
getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
.legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 16}});
@@ -279,7 +315,28 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
} else if (ST.hasStdExtD()) {
LoadStoreActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
}
- LoadStoreActions.clampScalar(0, s32, sXLen).lower();
+ if (ST.hasVInstructions() && ST.getELen() == 64)
+ LoadStoreActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
+ {nxv1s16, p0, nxv1s16, 16},
+ {nxv1s32, p0, nxv1s32, 32}});
+
+ if (ST.hasVInstructionsI64())
+ LoadStoreActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
+
+ {nxv2s64, p0, nxv2s64, 64},
+ {nxv4s64, p0, nxv4s64, 64},
+ {nxv8s64, p0, nxv8s64, 64}});
+
+ LoadStoreActions.widenScalarToNextPow2(0, /* MinSize = */ 8)
+ .lowerIfMemSizeNotByteSizePow2()
+ // we will take the custom lowering logic if we have scalable vector types
+ // with non-standard alignments
+ .customIf(LegalityPredicate(
+ LegalityPredicates::any(typeIsLegalIntOrFPVec(0, IntOrFPVecTys, ST),
+ typeIsLegalPtrVec(0, PtrVecTys, ST))))
+ .clampScalar(0, s32, sXLen)
+ .lower();
+
ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen).lower();
getActionDefinitionsBuilder({G_PTR_ADD, G_PTRMASK}).legalFor({{p0, sXLen}});
@@ -651,6 +708,46 @@ bool RISCVLegalizerInfo::legalizeExt(MachineInstr &MI,
return true;
}
+bool RISCVLegalizerInfo::legalizeLoadStore(MachineInstr &MI,
+ LegalizerHelper &Helper,
+ MachineIRBuilder &MIB) const {
+ assert((isa<GLoad>(MI) || isa<GStore>(MI)) &&
+ "Machine instructions must be Load/Store.");
+ MachineRegisterInfo &MRI = *MIB.getMRI();
+ MachineFunction *MF = MI.getMF();
+ const DataLayout &DL = MIB.getDataLayout();
+ LLVMContext &Ctx = MF->getFunction().getContext();
+
+ Register DstReg = MI.getOperand(0).getReg();
+ LLT DataTy = MRI.getType(DstReg);
+ if (!DataTy.isVector())
+ return false;
+
+ if (!MI.hasOneMemOperand())
+ return false;
+
+ MachineMemOperand *MMO = *MI.memoperands_begin();
+
+ const auto *TLI = STI.getTargetLowering();
+ EVT VT = EVT::getEVT(getTypeForLLT(DataTy, Ctx));
+
+ if (TLI->allowsMemoryAccessForAlignment(Ctx, DL, VT, *MMO))
+ return true;
+
+ unsigned EltSizeBits = DataTy.getScalarSizeInBits();
+ assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
+ "Unexpected unaligned RVV load type");
+
+ // Calculate the new vector type with i8 elements
+ unsigned NumElements =
+ DataTy.getElementCount().getKnownMinValue() * (EltSizeBits / 8);
+ LLT NewDataTy = LLT::scalable_vector(NumElements, 8);
+
+ Helper.bitcast(MI, 0, NewDataTy);
+
+ return true;
+}
+
/// Return the type of the mask type suitable for masking the provided
/// vector type. This is simply an i1 element type vector of the same
/// (possibly scalable) length.
@@ -828,6 +925,9 @@ bool RISCVLegalizerInfo::legalizeCustom(
return legalizeExt(MI, MIRBuilder);
case TargetOpcode::G_SPLAT_VECTOR:
return legalizeSplatVector(MI, MIRBuilder);
+ case TargetOpcode::G_LOAD:
+ case TargetOpcode::G_STORE:
+ return legalizeLoadStore(MI, Helper, MIRBuilder);
}
llvm_unreachable("expected switch to return");
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
index 5bb1e7a728278..2fc28615e7630 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -13,6 +13,7 @@
#ifndef LLVM_LIB_TARGET_RISCV_RISCVMACHINELEGALIZER_H
#define LLVM_LIB_TARGET_RISCV_RISCVMACHINELEGALIZER_H
+#include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
#include "llvm/CodeGen/GlobalISel/LegalizerInfo.h"
#include "llvm/CodeGen/Register.h"
@@ -45,6 +46,8 @@ class RISCVLegalizerInfo : public LegalizerInfo {
bool legalizeVScale(MachineInstr &MI, MachineIRBuilder &MIB) const;
bool legalizeExt(MachineInstr &MI, MachineIRBuilder &MIRBuilder) const;
bool legalizeSplatVector(MachineInstr &MI, MachineIRBuilder &MIB) const;
+ bool legalizeLoadStore(MachineInstr &MI, LegalizerHelper &Helper,
+ MachineIRBuilder &MIB) const;
};
} // end namespace llvm
#endif
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
new file mode 100644
index 0000000000000..12f218863e400
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
@@ -0,0 +1,1043 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+ define <vscale x 1 x i8> @vload_nxv1i8(ptr %pa) #0 {
+ %va = load <vscale x 1 x i8>, ptr %pa, align 1
+ ret <vscale x 1 x i8> %va
+ }
+
+ define <vscale x 2 x i8> @vload_nxv2i8(ptr %pa) #0 {
+ %va = load <vscale x 2 x i8>, ptr %pa, align 2
+ ret <vscale x 2 x i8> %va
+ }
+
+ define <vscale x 4 x i8> @vload_nxv4i8(ptr %pa) #0 {
+ %va = load <vscale x 4 x i8>, ptr %pa, align 4
+ ret <vscale x 4 x i8> %va
+ }
+
+ define <vscale x 8 x i8> @vload_nxv8i8(ptr %pa) #0 {
+ %va = load <vscale x 8 x i8>, ptr %pa, align 8
+ ret <vscale x 8 x i8> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nxv16i8(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 16
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 32 x i8> @vload_nxv32i8(ptr %pa) #0 {
+ %va = load <vscale x 32 x i8>, ptr %pa, align 32
+ ret <vscale x 32 x i8> %va
+ }
+
+ define <vscale x 64 x i8> @vload_nxv64i8(ptr %pa) #0 {
+ %va = load <vscale x 64 x i8>, ptr %pa, align 64
+ ret <vscale x 64 x i8> %va
+ }
+
+ define <vscale x 1 x i16> @vload_nxv1i16(ptr %pa) #0 {
+ %va = load <vscale x 1 x i16>, ptr %pa, align 2
+ ret <vscale x 1 x i16> %va
+ }
+
+ define <vscale x 2 x i16> @vload_nxv2i16(ptr %pa) #0 {
+ %va = load <vscale x 2 x i16>, ptr %pa, align 4
+ ret <vscale x 2 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nxv4i16(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 8
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 8 x i16> @vload_nxv8i16(ptr %pa) #0 {
+ %va = load <vscale x 8 x i16>, ptr %pa, align 16
+ ret <vscale x 8 x i16> %va
+ }
+
+ define <vscale x 16 x i16> @vload_nxv16i16(ptr %pa) #0 {
+ %va = load <vscale x 16 x i16>, ptr %pa, align 32
+ ret <vscale x 16 x i16> %va
+ }
+
+ define <vscale x 32 x i16> @vload_nxv32i16(ptr %pa) #0 {
+ %va = load <vscale x 32 x i16>, ptr %pa, align 64
+ ret <vscale x 32 x i16> %va
+ }
+
+ define <vscale x 1 x i32> @vload_nxv1i32(ptr %pa) #0 {
+ %va = load <vscale x 1 x i32>, ptr %pa, align 4
+ ret <vscale x 1 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nxv2i32(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 8
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 4 x i32> @vload_nxv4i32(ptr %pa) #0 {
+ %va = load <vscale x 4 x i32>, ptr %pa, align 16
+ ret <vscale x 4 x i32> %va
+ }
+
+ define <vscale x 8 x i32> @vload_nxv8i32(ptr %pa) #0 {
+ %va = load <vscale x 8 x i32>, ptr %pa, align 32
+ ret <vscale x 8 x i32> %va
+ }
+
+ define <vscale x 16 x i32> @vload_nxv16i32(ptr %pa) #0 {
+ %va = load <vscale x 16 x i32>, ptr %pa, align 64
+ ret <vscale x 16 x i32> %va
+ }
+
+ define <vscale x 1 x i64> @vload_nxv1i64(ptr %pa) #0 {
+ %va = load <vscale x 1 x i64>, ptr %pa, align 8
+ ret <vscale x 1 x i64> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nxv2i64(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 16
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 4 x i64> @vload_nxv4i64(ptr %pa) #0 {
+ %va = load <vscale x 4 x i64>, ptr %pa, align 32
+ ret <vscale x 4 x i64> %va
+ }
+
+ define <vscale x 8 x i64> @vload_nxv8i64(ptr %pa) #0 {
+ %va = load <vscale x 8 x i64>, ptr %pa, align 64
+ ret <vscale x 8 x i64> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nxv16i8_align1(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 1
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nxv16i8_align2(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 2
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nxv16i8_align16(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 16
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 16 x i8> @vload_nxv16i8_align64(ptr %pa) #0 {
+ %va = load <vscale x 16 x i8>, ptr %pa, align 64
+ ret <vscale x 16 x i8> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nxv4i16_align1(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 1
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nxv4i16_align2(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 2
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nxv4i16_align4(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 4
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nxv4i16_align8(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 8
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 4 x i16> @vload_nxv4i16_align16(ptr %pa) #0 {
+ %va = load <vscale x 4 x i16>, ptr %pa, align 16
+ ret <vscale x 4 x i16> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nxv2i32_align2(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 2
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nxv2i32_align4(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 4
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nxv2i32_align8(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 8
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nxv2i32_align16(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 16
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i32> @vload_nxv2i32_align256(ptr %pa) #0 {
+ %va = load <vscale x 2 x i32>, ptr %pa, align 256
+ ret <vscale x 2 x i32> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nxv2i64_align4(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 4
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nxv2i64_align8(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 8
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nxv2i64_align16(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 16
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 2 x i64> @vload_nxv2i64_align32(ptr %pa) #0 {
+ %va = load <vscale x 2 x i64>, ptr %pa, align 32
+ ret <vscale x 2 x i64> %va
+ }
+
+ define <vscale x 1 x ptr> @vload_nxv1ptr(ptr %pa) #0 {
+ %va = load <vscale x 1 x ptr>, ptr %pa, align 4
+ ret <vscale x 1 x ptr> %va
+ }
+
+ define <vscale x 2 x ptr> @vload_nxv2ptr(ptr %pa) #0 {
+ %va = load <vscale x 2 x ptr>, ptr %pa, align 8
+ ret <vscale x 2 x ptr> %va
+ }
+
+ define <vscale x 8 x ptr> @vload_nxv8ptr(ptr %pa) #0 {
+ %va = load <vscale x 8 x ptr>, ptr %pa, align 32
+ ret <vscale x 8 x ptr> %va
+ }
+
+ attributes #0 = { "target-features"="+v" }
+
+...
+---
+name: vload_nxv1i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv1i8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv4i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 4 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv8i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv8i8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 8 x s8>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv16i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv16i8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv32i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv32i8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 32 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 32 x s8>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vload_nxv64i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv64i8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 64 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+ $v8m8 = COPY %1(<vscale x 64 x s8>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vload_nxv1i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv1i16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv4i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv8i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv8i16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 8 x s16>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv16i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv16i16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 16 x s16>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vload_nxv32i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv32i16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 32 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+ $v8m8 = COPY %1(<vscale x 32 x s16>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vload_nxv1i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv1i32
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i32
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv4i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i32
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 4 x s32>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv8i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv8i32
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 8 x s32>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vload_nxv16i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv16i32
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+ $v8m8 = COPY %1(<vscale x 16 x s32>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vload_nxv1i64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv1i64
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x s64>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i64
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv4i64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i64
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 4 x s64>)
+ PseudoRET implicit $v8m4
+
+...
+---
+name: vload_nxv8i64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv8i64
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+ $v8m8 = COPY %1(<vscale x 8 x s64>)
+ PseudoRET implicit $v8m8
+
+...
+---
+name: vload_nxv16i8_align1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv16i8_align1
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv16i8_align2
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv16i8_align2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv16i8_align16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv16i8_align16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv16i8_align64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv16i8_align64
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+ $v8m2 = COPY %1(<vscale x 16 x s8>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv4i16_align1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i16_align1
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 1)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv4i16_align2
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i16_align2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv4i16_align4
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i16_align4
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv4i16_align8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i16_align8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv4i16_align16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv4i16_align16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+ $v8 = COPY %1(<vscale x 4 x s16>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i32_align2
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i32_align2
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+ ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 2)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i32_align4
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i32_align4
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i32_align8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i32_align8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i32_align16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i32_align16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i32_align256
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i32_align256
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+ $v8 = COPY %1(<vscale x 2 x s32>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2i64_align4
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i64_align4
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
+ ; CHECK-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 4)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv2i64_align8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i64_align8
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv2i64_align16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i64_align16
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv2i64_align32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2i64_align32
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m2
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+ $v8m2 = COPY %1(<vscale x 2 x s64>)
+ PseudoRET implicit $v8m2
+
+...
+---
+name: vload_nxv1ptr
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv1ptr
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 1 x p0>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv2ptr
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv2ptr
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
+ ; CHECK-NEXT: PseudoRET implicit $v8
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+ $v8 = COPY %1(<vscale x 2 x p0>)
+ PseudoRET implicit $v8
+
+...
+---
+name: vload_nxv8ptr
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10
+
+ ; CHECK-LABEL: name: vload_nxv8ptr
+ ; CHECK: liveins: $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
+ ; CHECK-NEXT: PseudoRET implicit $v8m4
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+ $v8m4 = COPY %1(<vscale x 8 x p0>)
+ PseudoRET implicit $v8m4
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
new file mode 100644
index 0000000000000..b91d25509646f
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
@@ -0,0 +1,1043 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+ define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) #0 {
+ store <vscale x 1 x i8> %b, ptr %pa, align 1
+ ret void
+ }
+
+ define void @vstore_nx2i8(ptr %pa, <vscale x 2 x i8> %b) #0 {
+ store <vscale x 2 x i8> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx4i8(ptr %pa, <vscale x 4 x i8> %b) #0 {
+ store <vscale x 4 x i8> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx8i8(ptr %pa, <vscale x 8 x i8> %b) #0 {
+ store <vscale x 8 x i8> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx16i8(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx32i8(ptr %pa, <vscale x 32 x i8> %b) #0 {
+ store <vscale x 32 x i8> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx64i8(ptr %pa, <vscale x 64 x i8> %b) #0 {
+ store <vscale x 64 x i8> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx1i16(ptr %pa, <vscale x 1 x i16> %b) #0 {
+ store <vscale x 1 x i16> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx2i16(ptr %pa, <vscale x 2 x i16> %b) #0 {
+ store <vscale x 2 x i16> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx4i16(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx8i16(ptr %pa, <vscale x 8 x i16> %b) #0 {
+ store <vscale x 8 x i16> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx16i16(ptr %pa, <vscale x 16 x i16> %b) #0 {
+ store <vscale x 16 x i16> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx32i16(ptr %pa, <vscale x 32 x i16> %b) #0 {
+ store <vscale x 32 x i16> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx1i32(ptr %pa, <vscale x 1 x i32> %b) #0 {
+ store <vscale x 1 x i32> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx2i32(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx4i32(ptr %pa, <vscale x 4 x i32> %b) #0 {
+ store <vscale x 4 x i32> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx8i32(ptr %pa, <vscale x 8 x i32> %b) #0 {
+ store <vscale x 8 x i32> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx16i32(ptr %pa, <vscale x 16 x i32> %b) #0 {
+ store <vscale x 16 x i32> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx1i64(ptr %pa, <vscale x 1 x i64> %b) #0 {
+ store <vscale x 1 x i64> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx2i64(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx4i64(ptr %pa, <vscale x 4 x i64> %b) #0 {
+ store <vscale x 4 x i64> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx8i64(ptr %pa, <vscale x 8 x i64> %b) #0 {
+ store <vscale x 8 x i64> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx16i8_align1(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 1
+ ret void
+ }
+
+ define void @vstore_nx16i8_align2(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx16i8_align16(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx16i8_align64(ptr %pa, <vscale x 16 x i8> %b) #0 {
+ store <vscale x 16 x i8> %b, ptr %pa, align 64
+ ret void
+ }
+
+ define void @vstore_nx4i16_align1(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 1
+ ret void
+ }
+
+ define void @vstore_nx4i16_align2(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx4i16_align4(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx4i16_align8(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx4i16_align16(ptr %pa, <vscale x 4 x i16> %b) #0 {
+ store <vscale x 4 x i16> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx2i32_align2(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 2
+ ret void
+ }
+
+ define void @vstore_nx2i32_align4(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx2i32_align8(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx2i32_align16(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx2i32_align256(ptr %pa, <vscale x 2 x i32> %b) #0 {
+ store <vscale x 2 x i32> %b, ptr %pa, align 256
+ ret void
+ }
+
+ define void @vstore_nx2i64_align4(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx2i64_align8(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx2i64_align16(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 16
+ ret void
+ }
+
+ define void @vstore_nx2i64_align32(ptr %pa, <vscale x 2 x i64> %b) #0 {
+ store <vscale x 2 x i64> %b, ptr %pa, align 32
+ ret void
+ }
+
+ define void @vstore_nx1ptr(ptr %pa, <vscale x 1 x ptr> %b) #0 {
+ store <vscale x 1 x ptr> %b, ptr %pa, align 4
+ ret void
+ }
+
+ define void @vstore_nx2ptr(ptr %pa, <vscale x 2 x ptr> %b) #0 {
+ store <vscale x 2 x ptr> %b, ptr %pa, align 8
+ ret void
+ }
+
+ define void @vstore_nx8ptr(ptr %pa, <vscale x 8 x ptr> %b) #0 {
+ store <vscale x 8 x ptr> %b, ptr %pa, align 32
+ ret void
+ }
+
+ attributes #0 = { "target-features"="+v" }
+
+...
+---
+name: vstore_nx1i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx1i8
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s8>) = COPY $v8
+ G_STORE %1(<vscale x 1 x s8>), %0(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx2i8
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s8>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s8>), %0(p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx4i8
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s8>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s8>), %0(p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx8i8
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s8>) = COPY $v8
+ G_STORE %1(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx16i8
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx32i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; CHECK-LABEL: name: vstore_nx32i8
+ ; CHECK: liveins: $x10, $v8m4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 32 x s8>) = COPY $v8m4
+ G_STORE %1(<vscale x 32 x s8>), %0(p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx64i8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m8
+
+ ; CHECK-LABEL: name: vstore_nx64i8
+ ; CHECK: liveins: $x10, $v8m8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 64 x s8>) = COPY $v8m8
+ G_STORE %1(<vscale x 64 x s8>), %0(p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx1i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx1i16
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 1 x s16>), %0(p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx2i16
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s16>), %0(p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx4i16
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx8i16
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s16>) = COPY $v8m2
+ G_STORE %1(<vscale x 8 x s16>), %0(p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; CHECK-LABEL: name: vstore_nx16i16
+ ; CHECK: liveins: $x10, $v8m4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s16>) = COPY $v8m4
+ G_STORE %1(<vscale x 16 x s16>), %0(p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx32i16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m8
+
+ ; CHECK-LABEL: name: vstore_nx32i16
+ ; CHECK: liveins: $x10, $v8m8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 32 x s16>) = COPY $v8m8
+ G_STORE %1(<vscale x 32 x s16>), %0(p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx1i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx1i32
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 1 x s32>), %0(p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx2i32
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx4i32
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s32>) = COPY $v8m2
+ G_STORE %1(<vscale x 4 x s32>), %0(p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; CHECK-LABEL: name: vstore_nx8i32
+ ; CHECK: liveins: $x10, $v8m4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s32>) = COPY $v8m4
+ G_STORE %1(<vscale x 8 x s32>), %0(p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m8
+
+ ; CHECK-LABEL: name: vstore_nx16i32
+ ; CHECK: liveins: $x10, $v8m8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s32>) = COPY $v8m8
+ G_STORE %1(<vscale x 16 x s32>), %0(p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx1i64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx1i64
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x s64>) = COPY $v8
+ G_STORE %1(<vscale x 1 x s64>), %0(p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx2i64
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m4
+
+ ; CHECK-LABEL: name: vstore_nx4i64
+ ; CHECK: liveins: $x10, $v8m4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s64>) = COPY $v8m4
+ G_STORE %1(<vscale x 4 x s64>), %0(p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8i64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m8
+
+ ; CHECK-LABEL: name: vstore_nx8i64
+ ; CHECK: liveins: $x10, $v8m8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x s64>) = COPY $v8m8
+ G_STORE %1(<vscale x 8 x s64>), %0(p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8_align1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx16i8_align1
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8_align2
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx16i8_align2
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8_align16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx16i8_align16
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx16i8_align64
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx16i8_align64
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 16 x s8>) = COPY $v8m2
+ G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align1
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx4i16_align1
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
+ ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 1)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align2
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx4i16_align2
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align4
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx4i16_align4
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx4i16_align8
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx4i16_align16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx4i16_align16
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 4 x s16>) = COPY $v8
+ G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align2
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx2i32_align2
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
+ ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 2)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align4
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx2i32_align4
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx2i32_align8
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx2i32_align16
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i32_align256
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx2i32_align256
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s32>) = COPY $v8
+ G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64_align4
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx2i64_align4
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+ ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
+ ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 4)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64_align8
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx2i64_align8
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64_align16
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx2i64_align16
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2i64_align32
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx2i64_align32
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x s64>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+ PseudoRET
+
+...
+---
+name: vstore_nx1ptr
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $v8, $x10
+
+ ; CHECK-LABEL: name: vstore_nx1ptr
+ ; CHECK: liveins: $v8, $x10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x p0>) = COPY $v8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 1 x p0>) = COPY $v8
+ G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx2ptr
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m2
+
+ ; CHECK-LABEL: name: vstore_nx2ptr
+ ; CHECK: liveins: $x10, $v8m2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x p0>) = COPY $v8m2
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 2 x p0>) = COPY $v8m2
+ G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+ PseudoRET
+
+...
+---
+name: vstore_nx8ptr
+body: |
+ bb.1 (%ir-block.0):
+ liveins: $x10, $v8m8
+
+ ; CHECK-LABEL: name: vstore_nx8ptr
+ ; CHECK: liveins: $x10, $v8m8
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x p0>) = COPY $v8m8
+ ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+ ; CHECK-NEXT: PseudoRET
+ %0:_(p0) = COPY $x10
+ %1:_(<vscale x 8 x p0>) = COPY $v8m8
+ G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+ PseudoRET
+
+...
More information about the llvm-commits
mailing list