[llvm] [RISCV][GlobalISel] Legalize Scalable Vector Loads and Stores (PR #84965)

Jiahan Xie via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 25 10:42:51 PDT 2024


https://github.com/jiahanxie353 updated https://github.com/llvm/llvm-project/pull/84965

>From 9debaf86a9ac8fb4236e16eda5ea46a74a5dce7e Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Sun, 24 Mar 2024 20:11:14 -0400
Subject: [PATCH 1/6] use TypeSize for mem type since it can be scalable vector

---
 llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 430fcae731689..4d9ce43876cc0 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -3397,8 +3397,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerLoad(GAnyLoad &LoadMI) {
   LLT MemTy = MMO.getMemoryType();
   MachineFunction &MF = MIRBuilder.getMF();
 
-  unsigned MemSizeInBits = MemTy.getSizeInBits();
-  unsigned MemStoreSizeInBits = 8 * MemTy.getSizeInBytes();
+  TypeSize MemSizeInBits = MemTy.getSizeInBits();
+  TypeSize MemStoreSizeInBits = MemTy.getSizeInBytes().multiplyCoefficientBy(8);
 
   if (MemSizeInBits != MemStoreSizeInBits) {
     if (MemTy.isVector())
@@ -3462,7 +3462,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerLoad(GAnyLoad &LoadMI) {
 
   if (!isPowerOf2_32(MemSizeInBits)) {
     // This load needs splitting into power of 2 sized loads.
-    LargeSplitSize = llvm::bit_floor(MemSizeInBits);
+    LargeSplitSize = llvm::bit_floor(MemSizeInBits.getKnownMinValue());
     SmallSplitSize = MemSizeInBits - LargeSplitSize;
   } else {
     // This is already a power of 2, but we still need to split this in half.

>From 5b628184d96198da37e808a4c492e18cbef41cf1 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Mon, 25 Mar 2024 12:16:36 -0400
Subject: [PATCH 2/6] legalize vload and vstore nxv1s8

---
 llvm/lib/CodeGen/MIRParser/MIParser.cpp       |  2 +-
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |  6 +++-
 .../legalizer/rvv/legalize-load.mir           | 29 ++++++++++++++++++
 .../legalizer/rvv/legalize-store.mir          | 30 +++++++++++++++++++
 4 files changed, 65 insertions(+), 2 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir

diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index 95924f0566283..1cbc4e408b86f 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -3421,7 +3421,7 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) {
     if (expectAndConsume(MIToken::rparen))
       return true;
 
-    Size = MemoryType.getSizeInBytes();
+    Size = MemoryType.getSizeInBytes().getKnownMinValue();
   }
 
   MachinePointerInfo Ptr = MachinePointerInfo();
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index bec542f7781b1..8c5af08a24349 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -264,7 +264,11 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
           .legalForTypesWithMemDesc({{s32, p0, s8, 8},
                                      {s32, p0, s16, 16},
                                      {s32, p0, s32, 32},
-                                     {p0, p0, sXLen, XLen}});
+                                     {p0, p0, sXLen, XLen},
+                                     {nxv1s8, p0, nxv1s8, 8}})
+          .widenScalarToNextPow2(0, /* MinSize = */ 8)
+          .lowerIfMemSizeNotByteSizePow2();
+
   auto &ExtLoadActions =
       getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
           .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 16}});
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
new file mode 100644
index 0000000000000..1b62f55520716
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
@@ -0,0 +1,29 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+  define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) {
+    %va = load <vscale x 1 x i8>, ptr %pa
+    ret <vscale x 1 x i8> %va
+  }
+
+...
+---
+name:            vload_nx1i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx1i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
new file mode 100644
index 0000000000000..b947861f7be50
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
@@ -0,0 +1,30 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+  define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) {
+    store <vscale x 1 x i8> %b, ptr %pa, align 1
+    ret void
+  }
+
+...
+---
+name:            vstore_nx1i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx1i8
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s8>) = COPY $v8
+    G_STORE %1(<vscale x 1 x s8>), %0(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+    PseudoRET
+
+...

>From 4f3b9159634044ac3b994f2892e655ac64678474 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Wed, 27 Mar 2024 14:32:04 -0400
Subject: [PATCH 3/6] add more tests for i8 elmty

---
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |  6 +-
 .../legalizer/rvv/legalize-store.mir          | 70 ++++++++++++++++++-
 2 files changed, 74 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 8c5af08a24349..1e3d6b12751af 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -265,7 +265,11 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
                                      {s32, p0, s16, 16},
                                      {s32, p0, s32, 32},
                                      {p0, p0, sXLen, XLen},
-                                     {nxv1s8, p0, nxv1s8, 8}})
+                                     {nxv1s8, p0, nxv1s8, 8},
+                                     {nxv2s8, p0, nxv2s8, 8},
+                                     {nxv4s8, p0, nxv4s8, 8},
+                                     {nxv8s8, p0, nxv8s8, 8},
+                                     {nxv16s8, p0, nxv16s8, 8}})
           .widenScalarToNextPow2(0, /* MinSize = */ 8)
           .lowerIfMemSizeNotByteSizePow2();
 
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
index b947861f7be50..66df14b6635d3 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
@@ -7,7 +7,27 @@
     store <vscale x 1 x i8> %b, ptr %pa, align 1
     ret void
   }
-
+ 
+  define <vscale x 2 x i8> @vload_nx2i8(ptr %pa) {
+    %va = load <vscale x 2 x i8>, ptr %pa, align 2
+    ret <vscale x 2 x i8> %va
+  }
+  
+  define <vscale x 4 x i8> @vload_nx4i8(ptr %pa) {
+    %va = load <vscale x 4 x i8>, ptr %pa, align 4
+    ret <vscale x 4 x i8> %va
+  }
+  
+  define <vscale x 8 x i8> @vload_nx8i8(ptr %pa) {
+    %va = load <vscale x 8 x i8>, ptr %pa, align 8
+    ret <vscale x 8 x i8> %va
+  }
+  
+  define <vscale x 16 x i8> @vload_nx16i8(ptr %pa) {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 16
+    ret <vscale x 16 x i8> %va
+  }
+  
 ...
 ---
 name:            vstore_nx1i8
@@ -28,3 +48,51 @@ body:             |
     PseudoRET
 
 ...
+---
+name:            vload_nx2i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx8i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx16i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...

>From 58b7094c82d0f20997c45c40401821c691901bae Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 16 Apr 2024 16:26:26 -0400
Subject: [PATCH 4/6] legalize all regular supported types from nxv1s8 to
 nxv8s64 without align info

---
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 28 ++++++++++++++++---
 1 file changed, 24 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 1e3d6b12751af..246d481c1d8d9 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -265,13 +265,21 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
                                      {s32, p0, s16, 16},
                                      {s32, p0, s32, 32},
                                      {p0, p0, sXLen, XLen},
-                                     {nxv1s8, p0, nxv1s8, 8},
                                      {nxv2s8, p0, nxv2s8, 8},
                                      {nxv4s8, p0, nxv4s8, 8},
                                      {nxv8s8, p0, nxv8s8, 8},
-                                     {nxv16s8, p0, nxv16s8, 8}})
-          .widenScalarToNextPow2(0, /* MinSize = */ 8)
-          .lowerIfMemSizeNotByteSizePow2();
+                                     {nxv16s8, p0, nxv16s8, 8},
+                                     {nxv32s8, p0, nxv32s8, 8},
+                                     {nxv64s8, p0, nxv64s8, 8},
+                                     {nxv2s16, p0, nxv2s16, 16},
+                                     {nxv4s16, p0, nxv4s16, 16},
+                                     {nxv8s16, p0, nxv8s16, 16},
+                                     {nxv16s16, p0, nxv16s16, 16},
+                                     {nxv32s16, p0, nxv32s16, 16},
+                                     {nxv2s32, p0, nxv2s32, 32},
+                                     {nxv4s32, p0, nxv4s32, 32},
+                                     {nxv8s32, p0, nxv8s32, 32},
+                                     {nxv16s32, p0, nxv16s32, 32}});
 
   auto &ExtLoadActions =
       getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
@@ -286,6 +294,18 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
   } else if (ST.hasStdExtD()) {
     LoadStoreActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
   }
+  if (ST.getELen() == 64)
+    LoadStoreActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
+                                               {nxv1s16, p0, nxv1s16, 16},
+                                               {nxv1s32, p0, nxv1s32, 32}});
+  if (ST.hasVInstructionsI64())
+    LoadStoreActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
+                                               {nxv2s64, p0, nxv2s64, 64},
+                                               {nxv4s64, p0, nxv4s64, 64},
+                                               {nxv8s64, p0, nxv8s64, 64}});
+  LoadStoreActions.widenScalarToNextPow2(0, /* MinSize = */ 8)
+      .lowerIfMemSizeNotByteSizePow2();
+
   LoadStoreActions.clampScalar(0, s32, sXLen).lower();
   ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen).lower();
 

>From a258cedb23a7ce71f2a5137896497a430d30e20a Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 11 Jun 2024 11:18:07 -0400
Subject: [PATCH 5/6] legalize rvv load for all llt and update all
 corresponding tests

---
 .../CodeGen/GlobalISel/LegalityPredicates.cpp |    3 +-
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |   58 +-
 .../Target/RISCV/GISel/RISCVLegalizerInfo.h   |    1 +
 .../legalizer/rvv/legalize-load.mir           | 1022 ++++++++++++++++-
 4 files changed, 1078 insertions(+), 6 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp b/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
index 2c77ed8b06008..8fe48195c610b 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
@@ -194,7 +194,8 @@ LegalityPredicate LegalityPredicates::memSizeNotByteSizePow2(unsigned MMOIdx) {
   return [=](const LegalityQuery &Query) {
     const LLT MemTy = Query.MMODescrs[MMOIdx].MemoryTy;
     return !MemTy.isByteSized() ||
-           !llvm::has_single_bit<uint32_t>(MemTy.getSizeInBytes());
+           !llvm::has_single_bit<uint32_t>(
+               MemTy.getSizeInBytes().getKnownMinValue());
   };
 }
 
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 246d481c1d8d9..6c9838e998ffa 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -303,8 +303,10 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
                                                {nxv2s64, p0, nxv2s64, 64},
                                                {nxv4s64, p0, nxv4s64, 64},
                                                {nxv8s64, p0, nxv8s64, 64}});
+
   LoadStoreActions.widenScalarToNextPow2(0, /* MinSize = */ 8)
-      .lowerIfMemSizeNotByteSizePow2();
+      .lowerIfMemSizeNotByteSizePow2()
+      .custom();
 
   LoadStoreActions.clampScalar(0, s32, sXLen).lower();
   ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen).lower();
@@ -677,6 +679,57 @@ bool RISCVLegalizerInfo::legalizeExt(MachineInstr &MI,
   return true;
 }
 
+bool RISCVLegalizerInfo::legalizeLoadStore(MachineInstr &MI,
+                                           MachineIRBuilder &MIB) const {
+  MachineRegisterInfo &MRI = *MIB.getMRI();
+  MachineFunction *MF = MI.getParent()->getParent();
+  const DataLayout &DL = MIB.getDataLayout();
+  LLVMContext &Ctx = MF->getFunction().getContext();
+
+  Register DstReg = MI.getOperand(0).getReg();
+  Register PtrReg = MI.getOperand(1).getReg();
+  LLT LoadTy = MRI.getType(DstReg);
+  assert(LoadTy.isVector() && "Expect vector load.");
+  assert(STI.hasVInstructions() &&
+         (LoadTy.getScalarSizeInBits() != 64 || STI.hasVInstructionsI64()) &&
+         (LoadTy.getElementCount().getKnownMinValue() != 1 ||
+          STI.getELen() == 64) &&
+         "Load type must be legal integer or floating point vector.");
+
+  assert(MI.hasOneMemOperand() &&
+         "Load instructions only have one MemOperand.");
+  Align Alignment = (*MI.memoperands_begin())->getAlign();
+  MachineMemOperand *LoadMMO = MF->getMachineMemOperand(
+      MachinePointerInfo(), MachineMemOperand::MOLoad, LoadTy, Alignment);
+
+  const auto *TLI = STI.getTargetLowering();
+  EVT VT = EVT::getEVT(getTypeForLLT(LoadTy, Ctx));
+
+  if (TLI->allowsMemoryAccessForAlignment(Ctx, DL, VT, *LoadMMO))
+    return true;
+
+  unsigned EltSizeBits = LoadTy.getScalarSizeInBits();
+  assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
+         "Unexpected unaligned RVV load type");
+
+  // Calculate the new vector type with i8 elements
+  unsigned NumElements =
+      LoadTy.getElementCount().getKnownMinValue() * (EltSizeBits / 8);
+  LLT NewLoadTy = LLT::scalable_vector(NumElements, 8);
+
+  MachinePointerInfo PI = cast<GLoad>(MI).getMMO().getPointerInfo();
+  MachineMemOperand *NewLoadMMO = MF->getMachineMemOperand(
+      PI, MachineMemOperand::MOLoad, NewLoadTy, Alignment);
+
+  auto NewLoad = MIB.buildLoad(NewLoadTy, PtrReg, *NewLoadMMO);
+
+  MIB.buildBitcast(DstReg, NewLoad);
+
+  MI.eraseFromParent();
+
+  return true;
+}
+
 /// Return the type of the mask type suitable for masking the provided
 /// vector type.  This is simply an i1 element type vector of the same
 /// (possibly scalable) length.
@@ -854,6 +907,9 @@ bool RISCVLegalizerInfo::legalizeCustom(
     return legalizeExt(MI, MIRBuilder);
   case TargetOpcode::G_SPLAT_VECTOR:
     return legalizeSplatVector(MI, MIRBuilder);
+  case TargetOpcode::G_LOAD:
+  case TargetOpcode::G_STORE:
+    return legalizeLoadStore(MI, MIRBuilder);
   }
 
   llvm_unreachable("expected switch to return");
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
index 5bb1e7a728278..0415d042c913b 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -45,6 +45,7 @@ class RISCVLegalizerInfo : public LegalizerInfo {
   bool legalizeVScale(MachineInstr &MI, MachineIRBuilder &MIB) const;
   bool legalizeExt(MachineInstr &MI, MachineIRBuilder &MIRBuilder) const;
   bool legalizeSplatVector(MachineInstr &MI, MachineIRBuilder &MIB) const;
+  bool legalizeLoadStore(MachineInstr &MI, MachineIRBuilder &MIB) const;
 };
 } // end namespace llvm
 #endif
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
index 1b62f55520716..12f218863e400 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
@@ -3,19 +3,231 @@
 # RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
 --- |
 
-  define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) {
-    %va = load <vscale x 1 x i8>, ptr %pa
+  define <vscale x 1 x i8> @vload_nxv1i8(ptr %pa) #0 {
+    %va = load <vscale x 1 x i8>, ptr %pa, align 1
     ret <vscale x 1 x i8> %va
   }
 
+  define <vscale x 2 x i8> @vload_nxv2i8(ptr %pa) #0 {
+    %va = load <vscale x 2 x i8>, ptr %pa, align 2
+    ret <vscale x 2 x i8> %va
+  }
+
+  define <vscale x 4 x i8> @vload_nxv4i8(ptr %pa) #0 {
+    %va = load <vscale x 4 x i8>, ptr %pa, align 4
+    ret <vscale x 4 x i8> %va
+  }
+
+  define <vscale x 8 x i8> @vload_nxv8i8(ptr %pa) #0 {
+    %va = load <vscale x 8 x i8>, ptr %pa, align 8
+    ret <vscale x 8 x i8> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nxv16i8(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 16
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 32 x i8> @vload_nxv32i8(ptr %pa) #0 {
+    %va = load <vscale x 32 x i8>, ptr %pa, align 32
+    ret <vscale x 32 x i8> %va
+  }
+
+  define <vscale x 64 x i8> @vload_nxv64i8(ptr %pa) #0 {
+    %va = load <vscale x 64 x i8>, ptr %pa, align 64
+    ret <vscale x 64 x i8> %va
+  }
+
+  define <vscale x 1 x i16> @vload_nxv1i16(ptr %pa) #0 {
+    %va = load <vscale x 1 x i16>, ptr %pa, align 2
+    ret <vscale x 1 x i16> %va
+  }
+
+  define <vscale x 2 x i16> @vload_nxv2i16(ptr %pa) #0 {
+    %va = load <vscale x 2 x i16>, ptr %pa, align 4
+    ret <vscale x 2 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nxv4i16(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 8
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 8 x i16> @vload_nxv8i16(ptr %pa) #0 {
+    %va = load <vscale x 8 x i16>, ptr %pa, align 16
+    ret <vscale x 8 x i16> %va
+  }
+
+  define <vscale x 16 x i16> @vload_nxv16i16(ptr %pa) #0 {
+    %va = load <vscale x 16 x i16>, ptr %pa, align 32
+    ret <vscale x 16 x i16> %va
+  }
+
+  define <vscale x 32 x i16> @vload_nxv32i16(ptr %pa) #0 {
+    %va = load <vscale x 32 x i16>, ptr %pa, align 64
+    ret <vscale x 32 x i16> %va
+  }
+
+  define <vscale x 1 x i32> @vload_nxv1i32(ptr %pa) #0 {
+    %va = load <vscale x 1 x i32>, ptr %pa, align 4
+    ret <vscale x 1 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nxv2i32(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 8
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 4 x i32> @vload_nxv4i32(ptr %pa) #0 {
+    %va = load <vscale x 4 x i32>, ptr %pa, align 16
+    ret <vscale x 4 x i32> %va
+  }
+
+  define <vscale x 8 x i32> @vload_nxv8i32(ptr %pa) #0 {
+    %va = load <vscale x 8 x i32>, ptr %pa, align 32
+    ret <vscale x 8 x i32> %va
+  }
+
+  define <vscale x 16 x i32> @vload_nxv16i32(ptr %pa) #0 {
+    %va = load <vscale x 16 x i32>, ptr %pa, align 64
+    ret <vscale x 16 x i32> %va
+  }
+
+  define <vscale x 1 x i64> @vload_nxv1i64(ptr %pa) #0 {
+    %va = load <vscale x 1 x i64>, ptr %pa, align 8
+    ret <vscale x 1 x i64> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nxv2i64(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 16
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 4 x i64> @vload_nxv4i64(ptr %pa) #0 {
+    %va = load <vscale x 4 x i64>, ptr %pa, align 32
+    ret <vscale x 4 x i64> %va
+  }
+
+  define <vscale x 8 x i64> @vload_nxv8i64(ptr %pa) #0 {
+    %va = load <vscale x 8 x i64>, ptr %pa, align 64
+    ret <vscale x 8 x i64> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nxv16i8_align1(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 1
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nxv16i8_align2(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 2
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nxv16i8_align16(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 16
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nxv16i8_align64(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 64
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nxv4i16_align1(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 1
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nxv4i16_align2(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 2
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nxv4i16_align4(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 4
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nxv4i16_align8(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 8
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nxv4i16_align16(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 16
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nxv2i32_align2(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 2
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nxv2i32_align4(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 4
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nxv2i32_align8(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 8
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nxv2i32_align16(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 16
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nxv2i32_align256(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 256
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nxv2i64_align4(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 4
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nxv2i64_align8(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 8
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nxv2i64_align16(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 16
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nxv2i64_align32(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 32
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 1 x ptr> @vload_nxv1ptr(ptr %pa) #0 {
+    %va = load <vscale x 1 x ptr>, ptr %pa, align 4
+    ret <vscale x 1 x ptr> %va
+  }
+
+  define <vscale x 2 x ptr> @vload_nxv2ptr(ptr %pa) #0 {
+    %va = load <vscale x 2 x ptr>, ptr %pa, align 8
+    ret <vscale x 2 x ptr> %va
+  }
+
+  define <vscale x 8 x ptr> @vload_nxv8ptr(ptr %pa) #0 {
+    %va = load <vscale x 8 x ptr>, ptr %pa, align 32
+    ret <vscale x 8 x ptr> %va
+  }
+
+  attributes #0 = { "target-features"="+v" }
+
 ...
 ---
-name:            vload_nx1i8
+name:            vload_nxv1i8
 body:             |
   bb.1 (%ir-block.0):
     liveins: $x10
 
-    ; CHECK-LABEL: name: vload_nx1i8
+    ; CHECK-LABEL: name: vload_nxv1i8
     ; CHECK: liveins: $x10
     ; CHECK-NEXT: {{  $}}
     ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
@@ -27,3 +239,805 @@ body:             |
     $v8 = COPY %1(<vscale x 1 x s8>)
     PseudoRET implicit $v8
 
+...
+---
+name:            vload_nxv2i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv2i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv4i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv4i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv8i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv8i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv16i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv16i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nxv32i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv32i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 32 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vload_nxv64i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv64i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 64 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+    $v8m8 = COPY %1(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vload_nxv1i16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv1i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv2i16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv2i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv4i16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv4i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv8i16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv8i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nxv16i16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv16i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vload_nxv32i16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv32i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 32 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+    $v8m8 = COPY %1(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vload_nxv1i32
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv1i32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv2i32
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv2i32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv4i32
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv4i32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nxv8i32
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv8i32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vload_nxv16i32
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv16i32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+    $v8m8 = COPY %1(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vload_nxv1i64
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv1i64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv2i64
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv2i64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nxv4i64
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv4i64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vload_nxv8i64
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv8i64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+    ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+    $v8m8 = COPY %1(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vload_nxv16i8_align1
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv16i8_align1
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nxv16i8_align2
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv16i8_align2
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nxv16i8_align16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv16i8_align16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nxv16i8_align64
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv16i8_align64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nxv4i16_align1
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv4i16_align1
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 1)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv4i16_align2
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv4i16_align2
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv4i16_align4
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv4i16_align4
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv4i16_align8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv4i16_align8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv4i16_align16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv4i16_align16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv2i32_align2
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv2i32_align2
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 2)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv2i32_align4
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv2i32_align4
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv2i32_align8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv2i32_align8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv2i32_align16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv2i32_align16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv2i32_align256
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv2i32_align256
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv2i64_align4
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv2i64_align4
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 4)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nxv2i64_align8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv2i64_align8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nxv2i64_align16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv2i64_align16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nxv2i64_align32
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv2i64_align32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nxv1ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv1ptr
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x p0>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv2ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv2ptr
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x p0>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nxv8ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nxv8ptr
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 8 x p0>)
+    PseudoRET implicit $v8m4
+
+...

>From faeb4649bae144a1f56ba03f921e928161d8189a Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 25 Jun 2024 13:42:30 -0400
Subject: [PATCH 6/6] legalize store

---
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |   41 +-
 .../legalizer/rvv/legalize-store.mir          | 1029 ++++++++++++++++-
 2 files changed, 1010 insertions(+), 60 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 6c9838e998ffa..1d5fa8961660e 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -19,6 +19,7 @@
 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
 #include "llvm/CodeGen/MachineConstantPool.h"
+#include "llvm/CodeGen/MachineMemOperand.h"
 #include "llvm/CodeGen/MachineRegisterInfo.h"
 #include "llvm/CodeGen/TargetOpcodes.h"
 #include "llvm/CodeGen/ValueTypes.h"
@@ -688,42 +689,46 @@ bool RISCVLegalizerInfo::legalizeLoadStore(MachineInstr &MI,
 
   Register DstReg = MI.getOperand(0).getReg();
   Register PtrReg = MI.getOperand(1).getReg();
-  LLT LoadTy = MRI.getType(DstReg);
-  assert(LoadTy.isVector() && "Expect vector load.");
+  LLT DataTy = MRI.getType(DstReg);
+  assert(DataTy.isVector() && "Expect vector load.");
   assert(STI.hasVInstructions() &&
-         (LoadTy.getScalarSizeInBits() != 64 || STI.hasVInstructionsI64()) &&
-         (LoadTy.getElementCount().getKnownMinValue() != 1 ||
+         (DataTy.getScalarSizeInBits() != 64 || STI.hasVInstructionsI64()) &&
+         (DataTy.getElementCount().getKnownMinValue() != 1 ||
           STI.getELen() == 64) &&
          "Load type must be legal integer or floating point vector.");
 
   assert(MI.hasOneMemOperand() &&
          "Load instructions only have one MemOperand.");
-  Align Alignment = (*MI.memoperands_begin())->getAlign();
-  MachineMemOperand *LoadMMO = MF->getMachineMemOperand(
-      MachinePointerInfo(), MachineMemOperand::MOLoad, LoadTy, Alignment);
+  MachineMemOperand *MMO = *MI.memoperands_begin();
+  Align Alignment = MMO->getAlign();
 
   const auto *TLI = STI.getTargetLowering();
-  EVT VT = EVT::getEVT(getTypeForLLT(LoadTy, Ctx));
+  EVT VT = EVT::getEVT(getTypeForLLT(DataTy, Ctx));
 
-  if (TLI->allowsMemoryAccessForAlignment(Ctx, DL, VT, *LoadMMO))
+  if (TLI->allowsMemoryAccessForAlignment(Ctx, DL, VT, *MMO))
     return true;
 
-  unsigned EltSizeBits = LoadTy.getScalarSizeInBits();
+  unsigned EltSizeBits = DataTy.getScalarSizeInBits();
   assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
          "Unexpected unaligned RVV load type");
 
   // Calculate the new vector type with i8 elements
   unsigned NumElements =
-      LoadTy.getElementCount().getKnownMinValue() * (EltSizeBits / 8);
-  LLT NewLoadTy = LLT::scalable_vector(NumElements, 8);
+      DataTy.getElementCount().getKnownMinValue() * (EltSizeBits / 8);
+  LLT NewDataTy = LLT::scalable_vector(NumElements, 8);
 
-  MachinePointerInfo PI = cast<GLoad>(MI).getMMO().getPointerInfo();
-  MachineMemOperand *NewLoadMMO = MF->getMachineMemOperand(
-      PI, MachineMemOperand::MOLoad, NewLoadTy, Alignment);
+  MachinePointerInfo PI = MMO->getPointerInfo();
+  MachineMemOperand *NewMMO =
+      MF->getMachineMemOperand(PI, MMO->getFlags(), NewDataTy, Alignment);
 
-  auto NewLoad = MIB.buildLoad(NewLoadTy, PtrReg, *NewLoadMMO);
-
-  MIB.buildBitcast(DstReg, NewLoad);
+  if (isa<GLoad>(MI)) {
+    auto NewLoad = MIB.buildLoad(NewDataTy, PtrReg, *NewMMO);
+    MIB.buildBitcast(DstReg, NewLoad);
+  } else {
+    assert(isa<GStore>(MI) && "Machine instructions must be Load/Store.");
+    auto BitcastedData = MIB.buildBitcast(NewDataTy, DstReg);
+    MIB.buildStore(BitcastedData, PtrReg, *NewMMO);
+  }
 
   MI.eraseFromParent();
 
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
index 66df14b6635d3..317ddb4199802 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
@@ -3,31 +3,223 @@
 # RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
 --- |
 
-  define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) {
+  define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) #0 {
     store <vscale x 1 x i8> %b, ptr %pa, align 1
     ret void
   }
- 
-  define <vscale x 2 x i8> @vload_nx2i8(ptr %pa) {
-    %va = load <vscale x 2 x i8>, ptr %pa, align 2
-    ret <vscale x 2 x i8> %va
+
+  define void @vstore_nx2i8(ptr %pa, <vscale x 2 x i8> %b) #0 {
+    store <vscale x 2 x i8> %b, ptr %pa, align 2
+    ret void
+  }
+
+  define void @vstore_nx4i8(ptr %pa, <vscale x 4 x i8> %b) #0 {
+    store <vscale x 4 x i8> %b, ptr %pa, align 4
+    ret void
+  }
+
+  define void @vstore_nx8i8(ptr %pa, <vscale x 8 x i8> %b) #0 {
+    store <vscale x 8 x i8> %b, ptr %pa, align 8
+    ret void
+  }
+
+  define void @vstore_nx16i8(ptr %pa, <vscale x 16 x i8> %b) #0 {
+    store <vscale x 16 x i8> %b, ptr %pa, align 16
+    ret void
+  }
+
+  define void @vstore_nx32i8(ptr %pa, <vscale x 32 x i8> %b) #0 {
+    store <vscale x 32 x i8> %b, ptr %pa, align 32
+    ret void
+  }
+
+  define void @vstore_nx64i8(ptr %pa, <vscale x 64 x i8> %b) #0 {
+    store <vscale x 64 x i8> %b, ptr %pa, align 64
+    ret void
+  }
+
+  define void @vstore_nx1i16(ptr %pa, <vscale x 1 x i16> %b) #0 {
+    store <vscale x 1 x i16> %b, ptr %pa, align 2
+    ret void
+  }
+
+  define void @vstore_nx2i16(ptr %pa, <vscale x 2 x i16> %b) #0 {
+    store <vscale x 2 x i16> %b, ptr %pa, align 4
+    ret void
+  }
+
+  define void @vstore_nx4i16(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 8
+    ret void
+  }
+
+  define void @vstore_nx8i16(ptr %pa, <vscale x 8 x i16> %b) #0 {
+    store <vscale x 8 x i16> %b, ptr %pa, align 16
+    ret void
+  }
+
+  define void @vstore_nx16i16(ptr %pa, <vscale x 16 x i16> %b) #0 {
+    store <vscale x 16 x i16> %b, ptr %pa, align 32
+    ret void
+  }
+
+  define void @vstore_nx32i16(ptr %pa, <vscale x 32 x i16> %b) #0 {
+    store <vscale x 32 x i16> %b, ptr %pa, align 64
+    ret void
+  }
+
+  define void @vstore_nx1i32(ptr %pa, <vscale x 1 x i32> %b) #0 {
+    store <vscale x 1 x i32> %b, ptr %pa, align 4
+    ret void
+  }
+
+  define void @vstore_nx2i32(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 8
+    ret void
+  }
+
+  define void @vstore_nx4i32(ptr %pa, <vscale x 4 x i32> %b) #0 {
+    store <vscale x 4 x i32> %b, ptr %pa, align 16
+    ret void
+  }
+
+  define void @vstore_nx8i32(ptr %pa, <vscale x 8 x i32> %b) #0 {
+    store <vscale x 8 x i32> %b, ptr %pa, align 32
+    ret void
+  }
+
+  define void @vstore_nx16i32(ptr %pa, <vscale x 16 x i32> %b) #0 {
+    store <vscale x 16 x i32> %b, ptr %pa, align 64
+    ret void
+  }
+
+  define void @vstore_nx1i64(ptr %pa, <vscale x 1 x i64> %b) #0 {
+    store <vscale x 1 x i64> %b, ptr %pa, align 8
+    ret void
+  }
+
+  define void @vstore_nx2i64(ptr %pa, <vscale x 2 x i64> %b) #0 {
+    store <vscale x 2 x i64> %b, ptr %pa, align 16
+    ret void
+  }
+
+  define void @vstore_nx4i64(ptr %pa, <vscale x 4 x i64> %b) #0 {
+    store <vscale x 4 x i64> %b, ptr %pa, align 32
+    ret void
+  }
+
+  define void @vstore_nx8i64(ptr %pa, <vscale x 8 x i64> %b) #0 {
+    store <vscale x 8 x i64> %b, ptr %pa, align 64
+    ret void
+  }
+
+  define void @vstore_nx16i8_align1(ptr %pa, <vscale x 16 x i8> %b) #0 {
+    store <vscale x 16 x i8> %b, ptr %pa, align 1
+    ret void
+  }
+
+  define void @vstore_nx16i8_align2(ptr %pa, <vscale x 16 x i8> %b) #0 {
+    store <vscale x 16 x i8> %b, ptr %pa, align 2
+    ret void
+  }
+
+  define void @vstore_nx16i8_align16(ptr %pa, <vscale x 16 x i8> %b) #0 {
+    store <vscale x 16 x i8> %b, ptr %pa, align 16
+    ret void
+  }
+
+  define void @vstore_nx16i8_align64(ptr %pa, <vscale x 16 x i8> %b) #0 {
+    store <vscale x 16 x i8> %b, ptr %pa, align 64
+    ret void
+  }
+
+  define void @vstore_nx4i16_align1(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 1
+    ret void
+  }
+
+  define void @vstore_nx4i16_align2(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 2
+    ret void
+  }
+
+  define void @vstore_nx4i16_align4(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 4
+    ret void
+  }
+
+  define void @vstore_nx4i16_align8(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 8
+    ret void
+  }
+
+  define void @vstore_nx4i16_align16(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 16
+    ret void
+  }
+
+  define void @vstore_nx2i32_align2(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 2
+    ret void
+  }
+
+  define void @vstore_nx2i32_align4(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 4
+    ret void
+  }
+
+  define void @vstore_nx2i32_align8(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 8
+    ret void
+  }
+
+  define void @vstore_nx2i32_align16(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 16
+    ret void
+  }
+
+  define void @vstore_nx2i32_align256(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 256
+    ret void
+  }
+
+  define void @vstore_nx2i64_align4(ptr %pa, <vscale x 2 x i64> %b) #0 {
+    store <vscale x 2 x i64> %b, ptr %pa, align 4
+    ret void
+  }
+
+  define void @vstore_nx2i64_align8(ptr %pa, <vscale x 2 x i64> %b) #0 {
+    store <vscale x 2 x i64> %b, ptr %pa, align 8
+    ret void
+  }
+
+  define void @vstore_nx2i64_align16(ptr %pa, <vscale x 2 x i64> %b) #0 {
+    store <vscale x 2 x i64> %b, ptr %pa, align 16
+    ret void
+  }
+
+  define void @vstore_nx2i64_align32(ptr %pa, <vscale x 2 x i64> %b) #0 {
+    store <vscale x 2 x i64> %b, ptr %pa, align 32
+    ret void
   }
-  
-  define <vscale x 4 x i8> @vload_nx4i8(ptr %pa) {
-    %va = load <vscale x 4 x i8>, ptr %pa, align 4
-    ret <vscale x 4 x i8> %va
+
+  define void @vstore_nx1ptr(ptr %pa, <vscale x 1 x ptr> %b) #0 {
+    store <vscale x 1 x ptr> %b, ptr %pa, align 4
+    ret void
   }
-  
-  define <vscale x 8 x i8> @vload_nx8i8(ptr %pa) {
-    %va = load <vscale x 8 x i8>, ptr %pa, align 8
-    ret <vscale x 8 x i8> %va
+
+  define void @vstore_nx2ptr(ptr %pa, <vscale x 2 x ptr> %b) #0 {
+    store <vscale x 2 x ptr> %b, ptr %pa, align 8
+    ret void
   }
-  
-  define <vscale x 16 x i8> @vload_nx16i8(ptr %pa) {
-    %va = load <vscale x 16 x i8>, ptr %pa, align 16
-    ret <vscale x 16 x i8> %va
+
+  define void @vstore_nx8ptr(ptr %pa, <vscale x 8 x ptr> %b) #0 {
+    store <vscale x 8 x ptr> %b, ptr %pa, align 32
+    ret void
   }
-  
+
+  attributes #0 = { "target-features"="+v" }
+
 ...
 ---
 name:            vstore_nx1i8
@@ -49,50 +241,803 @@ body:             |
 
 ...
 ---
-name:            vload_nx2i8
+name:            vstore_nx2i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx2i8
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s8>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s8>), [[COPY]](p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s8>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s8>), %0(p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx4i8
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s8>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s8>), [[COPY]](p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s8>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s8>), %0(p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx8i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx8i8
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s8>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s8>) = COPY $v8
+    G_STORE %1(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; CHECK-LABEL: name: vstore_nx16i8
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = COPY $v8m2
+    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx32i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m4
+
+    ; CHECK-LABEL: name: vstore_nx32i8
+    ; CHECK: liveins: $x10, $v8m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s8>) = COPY $v8m4
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 32 x s8>), [[COPY]](p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 32 x s8>) = COPY $v8m4
+    G_STORE %1(<vscale x 32 x s8>), %0(p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx64i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m8
+
+    ; CHECK-LABEL: name: vstore_nx64i8
+    ; CHECK: liveins: $x10, $v8m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 64 x s8>) = COPY $v8m8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 64 x s8>), [[COPY]](p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 64 x s8>) = COPY $v8m8
+    G_STORE %1(<vscale x 64 x s8>), %0(p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx1i16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx1i16
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s16>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s16>), [[COPY]](p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 1 x s16>), %0(p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx2i16
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s16>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s16>), [[COPY]](p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s16>), %0(p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx4i16
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx8i16
 body:             |
   bb.1 (%ir-block.0):
-    liveins: $x10
-  
+    liveins: $x10, $v8m2
+
+    ; CHECK-LABEL: name: vstore_nx8i16
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s16>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s16>), [[COPY]](p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
-    %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
-    $v8 = COPY %1(<vscale x 2 x s8>)
-    PseudoRET implicit $v8
+    %1:_(<vscale x 8 x s16>) = COPY $v8m2
+    G_STORE %1(<vscale x 8 x s16>), %0(p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+    PseudoRET
 
 ...
 ---
-name:            vload_nx4i8
+name:            vstore_nx16i16
 body:             |
   bb.1 (%ir-block.0):
-    liveins: $x10
-  
+    liveins: $x10, $v8m4
+
+    ; CHECK-LABEL: name: vstore_nx16i16
+    ; CHECK: liveins: $x10, $v8m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s16>) = COPY $v8m4
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s16>), [[COPY]](p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
-    %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
-    $v8 = COPY %1(<vscale x 4 x s8>)
-    PseudoRET implicit $v8
+    %1:_(<vscale x 16 x s16>) = COPY $v8m4
+    G_STORE %1(<vscale x 16 x s16>), %0(p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+    PseudoRET
 
 ...
 ---
-name:            vload_nx8i8
+name:            vstore_nx32i16
 body:             |
   bb.1 (%ir-block.0):
-    liveins: $x10
-  
+    liveins: $x10, $v8m8
+
+    ; CHECK-LABEL: name: vstore_nx32i16
+    ; CHECK: liveins: $x10, $v8m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 32 x s16>) = COPY $v8m8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 32 x s16>), [[COPY]](p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
-    %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
-    $v8 = COPY %1(<vscale x 8 x s8>)
-    PseudoRET implicit $v8
+    %1:_(<vscale x 32 x s16>) = COPY $v8m8
+    G_STORE %1(<vscale x 32 x s16>), %0(p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+    PseudoRET
 
 ...
 ---
-name:            vload_nx16i8
+name:            vstore_nx1i32
 body:             |
   bb.1 (%ir-block.0):
-    liveins: $x10
-  
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx1i32
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s32>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s32>), [[COPY]](p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
     %0:_(p0) = COPY $x10
-    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
-    $v8m2 = COPY %1(<vscale x 16 x s8>)
-    PseudoRET implicit $v8m2
+    %1:_(<vscale x 1 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 1 x s32>), %0(p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx2i32
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i32
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; CHECK-LABEL: name: vstore_nx4i32
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s32>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s32>), [[COPY]](p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s32>) = COPY $v8m2
+    G_STORE %1(<vscale x 4 x s32>), %0(p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx8i32
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m4
+
+    ; CHECK-LABEL: name: vstore_nx8i32
+    ; CHECK: liveins: $x10, $v8m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s32>) = COPY $v8m4
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s32>), [[COPY]](p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s32>) = COPY $v8m4
+    G_STORE %1(<vscale x 8 x s32>), %0(p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i32
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m8
+
+    ; CHECK-LABEL: name: vstore_nx16i32
+    ; CHECK: liveins: $x10, $v8m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s32>) = COPY $v8m8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s32>), [[COPY]](p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s32>) = COPY $v8m8
+    G_STORE %1(<vscale x 16 x s32>), %0(p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx1i64
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx1i64
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s64>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s64>), [[COPY]](p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s64>) = COPY $v8
+    G_STORE %1(<vscale x 1 x s64>), %0(p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i64
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; CHECK-LABEL: name: vstore_nx2i64
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = COPY $v8m2
+    G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i64
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m4
+
+    ; CHECK-LABEL: name: vstore_nx4i64
+    ; CHECK: liveins: $x10, $v8m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s64>) = COPY $v8m4
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s64>), [[COPY]](p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s64>) = COPY $v8m4
+    G_STORE %1(<vscale x 4 x s64>), %0(p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx8i64
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m8
+
+    ; CHECK-LABEL: name: vstore_nx8i64
+    ; CHECK: liveins: $x10, $v8m8
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x s64>) = COPY $v8m8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x s64>), [[COPY]](p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s64>) = COPY $v8m8
+    G_STORE %1(<vscale x 8 x s64>), %0(p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i8_align1
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; CHECK-LABEL: name: vstore_nx16i8_align1
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = COPY $v8m2
+    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i8_align2
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; CHECK-LABEL: name: vstore_nx16i8_align2
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = COPY $v8m2
+    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i8_align16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; CHECK-LABEL: name: vstore_nx16i8_align16
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = COPY $v8m2
+    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i8_align64
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; CHECK-LABEL: name: vstore_nx16i8_align64
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 16 x s8>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = COPY $v8m2
+    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16_align1
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx4i16_align1
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 1)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16_align2
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx4i16_align2
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16_align4
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx4i16_align4
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16_align8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx4i16_align8
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16_align16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx4i16_align16
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 4 x s16>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 4 x s16>), [[COPY]](p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32_align2
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx2i32_align2
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 8 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 8 x s8>), [[COPY]](p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 2)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32_align4
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx2i32_align4
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32_align8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx2i32_align8
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32_align16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx2i32_align16
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32_align256
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx2i32_align256
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s32>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s32>), [[COPY]](p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i64_align4
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; CHECK-LABEL: name: vstore_nx2i64_align4
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 16 x s8>) = G_BITCAST [[COPY1]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: G_STORE [[BITCAST]](<vscale x 16 x s8>), [[COPY]](p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = COPY $v8m2
+    G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 4)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i64_align8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; CHECK-LABEL: name: vstore_nx2i64_align8
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = COPY $v8m2
+    G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i64_align16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; CHECK-LABEL: name: vstore_nx2i64_align16
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = COPY $v8m2
+    G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i64_align32
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+
+    ; CHECK-LABEL: name: vstore_nx2i64_align32
+    ; CHECK: liveins: $x10, $v8m2
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x s64>) = COPY $v8m2
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x s64>), [[COPY]](p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = COPY $v8m2
+    G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+    PseudoRET
+
+...
+---
+name:            vstore_nx1ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx1ptr
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x p0>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x p0>), [[COPY]](p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x p0>) = COPY $v8
+    G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx2ptr
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 2 x p0>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 2 x p0>), [[COPY]](p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x p0>) = COPY $v8
+    G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx8ptr
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m4
+
+    ; CHECK-LABEL: name: vstore_nx8ptr
+    ; CHECK: liveins: $x10, $v8m4
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 8 x p0>) = COPY $v8m4
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 8 x p0>), [[COPY]](p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x p0>) = COPY $v8m4
+    G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+    PseudoRET
 
 ...



More information about the llvm-commits mailing list