[llvm] [RISCV][GlobalISel] Legalize Scalable Vector Loads and Stores (PR #84965)

Jiahan Xie via llvm-commits llvm-commits at lists.llvm.org
Tue Jun 11 09:08:19 PDT 2024


https://github.com/jiahanxie353 updated https://github.com/llvm/llvm-project/pull/84965

>From 82fef783a38df5919b4e287090cedec3a02b14fa Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Sun, 24 Mar 2024 20:11:14 -0400
Subject: [PATCH 1/5] use TypeSize for mem type since it can be scalable vector

---
 llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
index 9830b521797c1..d8d26c9a6f2dd 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalizerHelper.cpp
@@ -3399,8 +3399,8 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerLoad(GAnyLoad &LoadMI) {
   LLT MemTy = MMO.getMemoryType();
   MachineFunction &MF = MIRBuilder.getMF();
 
-  unsigned MemSizeInBits = MemTy.getSizeInBits();
-  unsigned MemStoreSizeInBits = 8 * MemTy.getSizeInBytes();
+  TypeSize MemSizeInBits = MemTy.getSizeInBits();
+  TypeSize MemStoreSizeInBits = MemTy.getSizeInBytes().multiplyCoefficientBy(8);
 
   if (MemSizeInBits != MemStoreSizeInBits) {
     if (MemTy.isVector())
@@ -3464,7 +3464,7 @@ LegalizerHelper::LegalizeResult LegalizerHelper::lowerLoad(GAnyLoad &LoadMI) {
 
   if (!isPowerOf2_32(MemSizeInBits)) {
     // This load needs splitting into power of 2 sized loads.
-    LargeSplitSize = llvm::bit_floor(MemSizeInBits);
+    LargeSplitSize = llvm::bit_floor(MemSizeInBits.getKnownMinValue());
     SmallSplitSize = MemSizeInBits - LargeSplitSize;
   } else {
     // This is already a power of 2, but we still need to split this in half.

>From 7519b7b034def4845173df4ec54979d5469cdcb3 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Mon, 25 Mar 2024 12:16:36 -0400
Subject: [PATCH 2/5] legalize vload and vstore nxv1s8

---
 llvm/lib/CodeGen/MIRParser/MIParser.cpp       |  2 +-
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |  6 +++-
 .../legalizer/rvv/legalize-load.mir           | 29 ++++++++++++++++++
 .../legalizer/rvv/legalize-store.mir          | 30 +++++++++++++++++++
 4 files changed, 65 insertions(+), 2 deletions(-)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir

diff --git a/llvm/lib/CodeGen/MIRParser/MIParser.cpp b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
index 95924f0566283..1cbc4e408b86f 100644
--- a/llvm/lib/CodeGen/MIRParser/MIParser.cpp
+++ b/llvm/lib/CodeGen/MIRParser/MIParser.cpp
@@ -3421,7 +3421,7 @@ bool MIParser::parseMachineMemoryOperand(MachineMemOperand *&Dest) {
     if (expectAndConsume(MIToken::rparen))
       return true;
 
-    Size = MemoryType.getSizeInBytes();
+    Size = MemoryType.getSizeInBytes().getKnownMinValue();
   }
 
   MachinePointerInfo Ptr = MachinePointerInfo();
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 816e62ea24edb..a2a10b6453e47 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -264,7 +264,11 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
           .legalForTypesWithMemDesc({{s32, p0, s8, 8},
                                      {s32, p0, s16, 16},
                                      {s32, p0, s32, 32},
-                                     {p0, p0, sXLen, XLen}});
+                                     {p0, p0, sXLen, XLen},
+                                     {nxv1s8, p0, nxv1s8, 8}})
+          .widenScalarToNextPow2(0, /* MinSize = */ 8)
+          .lowerIfMemSizeNotByteSizePow2();
+
   auto &ExtLoadActions =
       getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
           .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 16}});
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
new file mode 100644
index 0000000000000..1b62f55520716
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
@@ -0,0 +1,29 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+  define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) {
+    %va = load <vscale x 1 x i8>, ptr %pa
+    ret <vscale x 1 x i8> %va
+  }
+
+...
+---
+name:            vload_nx1i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx1i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
new file mode 100644
index 0000000000000..b947861f7be50
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
@@ -0,0 +1,30 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
+--- |
+
+  define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) {
+    store <vscale x 1 x i8> %b, ptr %pa, align 1
+    ret void
+  }
+
+...
+---
+name:            vstore_nx1i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+
+    ; CHECK-LABEL: name: vstore_nx1i8
+    ; CHECK: liveins: $v8, $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[COPY1:%[0-9]+]]:_(<vscale x 1 x s8>) = COPY $v8
+    ; CHECK-NEXT: G_STORE [[COPY1]](<vscale x 1 x s8>), [[COPY]](p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+    ; CHECK-NEXT: PseudoRET
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s8>) = COPY $v8
+    G_STORE %1(<vscale x 1 x s8>), %0(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+    PseudoRET
+
+...

>From 93aa610319aebce1be839c507e37569f673cce8d Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Wed, 27 Mar 2024 14:32:04 -0400
Subject: [PATCH 3/5] add more tests for i8 elmty

---
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |  6 +-
 .../legalizer/rvv/legalize-store.mir          | 70 ++++++++++++++++++-
 2 files changed, 74 insertions(+), 2 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index a2a10b6453e47..939213af380a6 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -265,7 +265,11 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
                                      {s32, p0, s16, 16},
                                      {s32, p0, s32, 32},
                                      {p0, p0, sXLen, XLen},
-                                     {nxv1s8, p0, nxv1s8, 8}})
+                                     {nxv1s8, p0, nxv1s8, 8},
+                                     {nxv2s8, p0, nxv2s8, 8},
+                                     {nxv4s8, p0, nxv4s8, 8},
+                                     {nxv8s8, p0, nxv8s8, 8},
+                                     {nxv16s8, p0, nxv16s8, 8}})
           .widenScalarToNextPow2(0, /* MinSize = */ 8)
           .lowerIfMemSizeNotByteSizePow2();
 
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
index b947861f7be50..66df14b6635d3 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-store.mir
@@ -7,7 +7,27 @@
     store <vscale x 1 x i8> %b, ptr %pa, align 1
     ret void
   }
-
+ 
+  define <vscale x 2 x i8> @vload_nx2i8(ptr %pa) {
+    %va = load <vscale x 2 x i8>, ptr %pa, align 2
+    ret <vscale x 2 x i8> %va
+  }
+  
+  define <vscale x 4 x i8> @vload_nx4i8(ptr %pa) {
+    %va = load <vscale x 4 x i8>, ptr %pa, align 4
+    ret <vscale x 4 x i8> %va
+  }
+  
+  define <vscale x 8 x i8> @vload_nx8i8(ptr %pa) {
+    %va = load <vscale x 8 x i8>, ptr %pa, align 8
+    ret <vscale x 8 x i8> %va
+  }
+  
+  define <vscale x 16 x i8> @vload_nx16i8(ptr %pa) {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 16
+    ret <vscale x 16 x i8> %va
+  }
+  
 ...
 ---
 name:            vstore_nx1i8
@@ -28,3 +48,51 @@ body:             |
     PseudoRET
 
 ...
+---
+name:            vload_nx2i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx8i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx16i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...

>From a53d111cd635065d13453308c78467077f630aee Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 16 Apr 2024 16:26:26 -0400
Subject: [PATCH 4/5] legalize all regular supported types from nxv1s8 to
 nxv8s64 without align info

---
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp | 28 ++++++++++++++++---
 1 file changed, 24 insertions(+), 4 deletions(-)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index 939213af380a6..f020f96af5876 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -265,13 +265,21 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
                                      {s32, p0, s16, 16},
                                      {s32, p0, s32, 32},
                                      {p0, p0, sXLen, XLen},
-                                     {nxv1s8, p0, nxv1s8, 8},
                                      {nxv2s8, p0, nxv2s8, 8},
                                      {nxv4s8, p0, nxv4s8, 8},
                                      {nxv8s8, p0, nxv8s8, 8},
-                                     {nxv16s8, p0, nxv16s8, 8}})
-          .widenScalarToNextPow2(0, /* MinSize = */ 8)
-          .lowerIfMemSizeNotByteSizePow2();
+                                     {nxv16s8, p0, nxv16s8, 8},
+                                     {nxv32s8, p0, nxv32s8, 8},
+                                     {nxv64s8, p0, nxv64s8, 8},
+                                     {nxv2s16, p0, nxv2s16, 16},
+                                     {nxv4s16, p0, nxv4s16, 16},
+                                     {nxv8s16, p0, nxv8s16, 16},
+                                     {nxv16s16, p0, nxv16s16, 16},
+                                     {nxv32s16, p0, nxv32s16, 16},
+                                     {nxv2s32, p0, nxv2s32, 32},
+                                     {nxv4s32, p0, nxv4s32, 32},
+                                     {nxv8s32, p0, nxv8s32, 32},
+                                     {nxv16s32, p0, nxv16s32, 32}});
 
   auto &ExtLoadActions =
       getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
@@ -286,6 +294,18 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
   } else if (ST.hasStdExtD()) {
     LoadStoreActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
   }
+  if (ST.getELen() == 64)
+    LoadStoreActions.legalForTypesWithMemDesc({{nxv1s8, p0, nxv1s8, 8},
+                                               {nxv1s16, p0, nxv1s16, 16},
+                                               {nxv1s32, p0, nxv1s32, 32}});
+  if (ST.hasVInstructionsI64())
+    LoadStoreActions.legalForTypesWithMemDesc({{nxv1s64, p0, nxv1s64, 64},
+                                               {nxv2s64, p0, nxv2s64, 64},
+                                               {nxv4s64, p0, nxv4s64, 64},
+                                               {nxv8s64, p0, nxv8s64, 64}});
+  LoadStoreActions.widenScalarToNextPow2(0, /* MinSize = */ 8)
+      .lowerIfMemSizeNotByteSizePow2();
+
   LoadStoreActions.clampScalar(0, s32, sXLen).lower();
   ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen).lower();
 

>From eee1b242c89cba3e16e93e566232d9f20d7f0008 Mon Sep 17 00:00:00 2001
From: jiahanxie353 <jx353 at cornell.edu>
Date: Tue, 11 Jun 2024 11:18:07 -0400
Subject: [PATCH 5/5] legalize rvv load for all llt and update all
 corresponding tests

---
 .../CodeGen/GlobalISel/LegalityPredicates.cpp |    3 +-
 .../Target/RISCV/GISel/RISCVLegalizerInfo.cpp |   59 +-
 .../Target/RISCV/GISel/RISCVLegalizerInfo.h   |    1 +
 .../legalizer/rvv/legalize-load.mir           | 1024 ++++++++++++++++-
 4 files changed, 1082 insertions(+), 5 deletions(-)

diff --git a/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp b/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
index 2c77ed8b06008..8fe48195c610b 100644
--- a/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/LegalityPredicates.cpp
@@ -194,7 +194,8 @@ LegalityPredicate LegalityPredicates::memSizeNotByteSizePow2(unsigned MMOIdx) {
   return [=](const LegalityQuery &Query) {
     const LLT MemTy = Query.MMODescrs[MMOIdx].MemoryTy;
     return !MemTy.isByteSized() ||
-           !llvm::has_single_bit<uint32_t>(MemTy.getSizeInBytes());
+           !llvm::has_single_bit<uint32_t>(
+               MemTy.getSizeInBytes().getKnownMinValue());
   };
 }
 
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
index f020f96af5876..b683389e8b0c0 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.cpp
@@ -303,8 +303,10 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
                                                {nxv2s64, p0, nxv2s64, 64},
                                                {nxv4s64, p0, nxv4s64, 64},
                                                {nxv8s64, p0, nxv8s64, 64}});
+
   LoadStoreActions.widenScalarToNextPow2(0, /* MinSize = */ 8)
-      .lowerIfMemSizeNotByteSizePow2();
+      .lowerIfMemSizeNotByteSizePow2()
+      .custom();
 
   LoadStoreActions.clampScalar(0, s32, sXLen).lower();
   ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen).lower();
@@ -491,9 +493,14 @@ RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
 }
 
 static Type *getTypeForLLT(LLT Ty, LLVMContext &C) {
-  if (Ty.isVector())
+  if (Ty.isFixedVector())
     return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
                                 Ty.getNumElements());
+  if (Ty.isScalableVector())
+    return ScalableVectorType::get(
+        IntegerType::get(C, Ty.getScalarSizeInBits()),
+        Ty.getElementCount().getKnownMinValue());
+
   return IntegerType::get(C, Ty.getSizeInBits());
 }
 
@@ -676,6 +683,51 @@ bool RISCVLegalizerInfo::legalizeExt(MachineInstr &MI,
   return true;
 }
 
+bool RISCVLegalizerInfo::legalizeLoadStore(MachineInstr &MI,
+                                           MachineIRBuilder &MIB) const {
+  MachineRegisterInfo &MRI = *MIB.getMRI();
+  MachineFunction *MF = MI.getParent()->getParent();
+  const DataLayout &DL = MIB.getDataLayout();
+  LLVMContext &Ctx = MF->getFunction().getContext();
+
+  Register DstReg = MI.getOperand(0).getReg();
+  Register PtrReg = MI.getOperand(1).getReg();
+  LLT LoadTy = MRI.getType(DstReg);
+
+  assert(MI.hasOneMemOperand() &&
+         "Load instructions only have one MemOperand.");
+  Align Alignment = (*MI.memoperands_begin())->getAlign();
+  MachineMemOperand *LoadMMO = MF->getMachineMemOperand(
+      MachinePointerInfo(), MachineMemOperand::MOLoad, LoadTy, Alignment);
+
+  const auto *TLI = STI.getTargetLowering();
+  EVT VT = EVT::getEVT(getTypeForLLT(LoadTy, Ctx));
+
+  if (TLI->allowsMemoryAccessForAlignment(Ctx, DL, VT, *LoadMMO))
+    return true;
+
+  unsigned EltSizeBits = LoadTy.getScalarSizeInBits();
+  assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
+         "Unexpected unaligned RVV load type");
+
+  // Calculate the new vector type with i8 elements
+  unsigned NumElements =
+      LoadTy.getElementCount().getKnownMinValue() * (EltSizeBits / 8);
+  LLT NewLoadTy = LLT::scalable_vector(NumElements, 8);
+
+  DstOp NewDstReg(NewLoadTy);
+  MachineMemOperand *NewLoadMMO = MF->getMachineMemOperand(
+      MachinePointerInfo(), MachineMemOperand::MOLoad, NewLoadTy, Alignment);
+
+  auto NewLoad = MIB.buildLoad(NewDstReg, PtrReg, *NewLoadMMO);
+
+  MIB.buildBitcast(DstReg, NewLoad.getReg(0));
+
+  MI.eraseFromParent();
+
+  return true;
+}
+
 /// Return the type of the mask type suitable for masking the provided
 /// vector type.  This is simply an i1 element type vector of the same
 /// (possibly scalable) length.
@@ -853,6 +905,9 @@ bool RISCVLegalizerInfo::legalizeCustom(
     return legalizeExt(MI, MIRBuilder);
   case TargetOpcode::G_SPLAT_VECTOR:
     return legalizeSplatVector(MI, MIRBuilder);
+  case TargetOpcode::G_LOAD:
+  case TargetOpcode::G_STORE:
+    return legalizeLoadStore(MI, MIRBuilder);
   }
 
   llvm_unreachable("expected switch to return");
diff --git a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
index 5bb1e7a728278..0415d042c913b 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
+++ b/llvm/lib/Target/RISCV/GISel/RISCVLegalizerInfo.h
@@ -45,6 +45,7 @@ class RISCVLegalizerInfo : public LegalizerInfo {
   bool legalizeVScale(MachineInstr &MI, MachineIRBuilder &MIB) const;
   bool legalizeExt(MachineInstr &MI, MachineIRBuilder &MIRBuilder) const;
   bool legalizeSplatVector(MachineInstr &MI, MachineIRBuilder &MIB) const;
+  bool legalizeLoadStore(MachineInstr &MI, MachineIRBuilder &MIB) const;
 };
 } // end namespace llvm
 #endif
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
index 1b62f55520716..2d294c8c26f17 100644
--- a/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/legalizer/rvv/legalize-load.mir
@@ -3,11 +3,223 @@
 # RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=legalizer %s -o - | FileCheck %s
 --- |
 
-  define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) {
-    %va = load <vscale x 1 x i8>, ptr %pa
+  define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) #0 {
+    %va = load <vscale x 1 x i8>, ptr %pa, align 1
     ret <vscale x 1 x i8> %va
   }
 
+  define <vscale x 2 x i8> @vload_nx2i8(ptr %pa) #0 {
+    %va = load <vscale x 2 x i8>, ptr %pa, align 2
+    ret <vscale x 2 x i8> %va
+  }
+
+  define <vscale x 4 x i8> @vload_nx4i8(ptr %pa) #0 {
+    %va = load <vscale x 4 x i8>, ptr %pa, align 4
+    ret <vscale x 4 x i8> %va
+  }
+
+  define <vscale x 8 x i8> @vload_nx8i8(ptr %pa) #0 {
+    %va = load <vscale x 8 x i8>, ptr %pa, align 8
+    ret <vscale x 8 x i8> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nx16i8(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 16
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 32 x i8> @vload_nx32i8(ptr %pa) #0 {
+    %va = load <vscale x 32 x i8>, ptr %pa, align 32
+    ret <vscale x 32 x i8> %va
+  }
+
+  define <vscale x 64 x i8> @vload_nx64i8(ptr %pa) #0 {
+    %va = load <vscale x 64 x i8>, ptr %pa, align 64
+    ret <vscale x 64 x i8> %va
+  }
+
+  define <vscale x 1 x i16> @vload_nx1i16(ptr %pa) #0 {
+    %va = load <vscale x 1 x i16>, ptr %pa, align 2
+    ret <vscale x 1 x i16> %va
+  }
+
+  define <vscale x 2 x i16> @vload_nx2i16(ptr %pa) #0 {
+    %va = load <vscale x 2 x i16>, ptr %pa, align 4
+    ret <vscale x 2 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 8
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 8 x i16> @vload_nx8i16(ptr %pa) #0 {
+    %va = load <vscale x 8 x i16>, ptr %pa, align 16
+    ret <vscale x 8 x i16> %va
+  }
+
+  define <vscale x 16 x i16> @vload_nx16i16(ptr %pa) #0 {
+    %va = load <vscale x 16 x i16>, ptr %pa, align 32
+    ret <vscale x 16 x i16> %va
+  }
+
+  define <vscale x 32 x i16> @vload_nx32i16(ptr %pa) #0 {
+    %va = load <vscale x 32 x i16>, ptr %pa, align 64
+    ret <vscale x 32 x i16> %va
+  }
+
+  define <vscale x 1 x i32> @vload_nx1i32(ptr %pa) #0 {
+    %va = load <vscale x 1 x i32>, ptr %pa, align 4
+    ret <vscale x 1 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 8
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 4 x i32> @vload_nx4i32(ptr %pa) #0 {
+    %va = load <vscale x 4 x i32>, ptr %pa, align 16
+    ret <vscale x 4 x i32> %va
+  }
+
+  define <vscale x 8 x i32> @vload_nx8i32(ptr %pa) #0 {
+    %va = load <vscale x 8 x i32>, ptr %pa, align 32
+    ret <vscale x 8 x i32> %va
+  }
+
+  define <vscale x 16 x i32> @vload_nx16i32(ptr %pa) #0 {
+    %va = load <vscale x 16 x i32>, ptr %pa, align 64
+    ret <vscale x 16 x i32> %va
+  }
+
+  define <vscale x 1 x i64> @vload_nx1i64(ptr %pa) #0 {
+    %va = load <vscale x 1 x i64>, ptr %pa, align 8
+    ret <vscale x 1 x i64> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nx2i64(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 16
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 4 x i64> @vload_nx4i64(ptr %pa) #0 {
+    %va = load <vscale x 4 x i64>, ptr %pa, align 32
+    ret <vscale x 4 x i64> %va
+  }
+
+  define <vscale x 8 x i64> @vload_nx8i64(ptr %pa) #0 {
+    %va = load <vscale x 8 x i64>, ptr %pa, align 64
+    ret <vscale x 8 x i64> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nx16i8_align1(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 1
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nx16i8_align2(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 2
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nx16i8_align16(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 16
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nx16i8_align64(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 64
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16_align1(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 1
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16_align2(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 2
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16_align4(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 4
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16_align8(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 8
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16_align16(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 16
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32_align2(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 2
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32_align4(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 4
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32_align8(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 8
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32_align16(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 16
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32_align256(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 256
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nx2i64_align4(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 4
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nx2i64_align8(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 8
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nx2i64_align16(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 16
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nx2i64_align32(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 32
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 1 x ptr> @vload_nx1ptr(ptr %pa) #0 {
+    %va = load <vscale x 1 x ptr>, ptr %pa, align 4
+    ret <vscale x 1 x ptr> %va
+  }
+
+  define <vscale x 2 x ptr> @vload_nx2ptr(ptr %pa) #0 {
+    %va = load <vscale x 2 x ptr>, ptr %pa, align 8
+    ret <vscale x 2 x ptr> %va
+  }
+
+  define <vscale x 8 x ptr> @vload_nx8ptr(ptr %pa) #0 {
+    %va = load <vscale x 8 x ptr>, ptr %pa, align 32
+    ret <vscale x 8 x ptr> %va
+  }
+
+  attributes #0 = { "target-features"="+v" }
+
 ...
 ---
 name:            vload_nx1i8
@@ -27,3 +239,811 @@ body:             |
     $v8 = COPY %1(<vscale x 1 x s8>)
     PseudoRET implicit $v8
 
+...
+---
+name:            vload_nx2i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx2i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx4i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx8i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx8i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx16i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx16i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx32i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx32i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 32 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 32 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vload_nx64i8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx64i8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 64 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 64 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 64 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+    $v8m8 = COPY %1(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vload_nx1i16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx1i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx2i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx4i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx8i16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx8i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 8 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx16i16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx16i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 16 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vload_nx32i16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx32i16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 32 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 32 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 32 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+    $v8m8 = COPY %1(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vload_nx1i32
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx1i32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx2i32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i32
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx4i32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 4 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx8i32
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx8i32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vload_nx16i32
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx16i32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 16 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+    $v8m8 = COPY %1(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vload_nx1i64
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx1i64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i64
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx2i64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx4i64
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx4i64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 4 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vload_nx8i64
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx8i64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+    ; CHECK-NEXT: $v8m8 = COPY [[LOAD]](<vscale x 8 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+    $v8m8 = COPY %1(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vload_nx16i8_align1
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx16i8_align1
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx16i8_align2
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx16i8_align2
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx16i8_align16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx16i8_align16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx16i8_align64
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx16i8_align64
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx4i16_align1
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx4i16_align1
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>), align 1)
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 4 x s16>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>), align 1)
+    %1:_(<vscale x 4 x s16>) = G_BITCAST %2(<vscale x 8 x s8>)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i16_align2
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx4i16_align2
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i16_align4
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx4i16_align4
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i16_align8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx4i16_align8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i16_align16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx4i16_align16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 4 x s16>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 4 x s16>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32_align2
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx2i32_align2
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x s8>), align 2)
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s32>) = G_BITCAST [[LOAD]](<vscale x 8 x s8>)
+    ; CHECK-NEXT: $v8 = COPY [[BITCAST]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %2:_(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>), align 2)
+    %1:_(<vscale x 2 x s32>) = G_BITCAST %2(<vscale x 8 x s8>)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32_align4
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx2i32_align4
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32_align8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx2i32_align8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32_align16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx2i32_align16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32_align256
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx2i32_align256
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s32>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s32>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i64_align4
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx2i64_align4
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 16 x s8>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 16 x s8>), align 4)
+    ; CHECK-NEXT: [[BITCAST:%[0-9]+]]:_(<vscale x 2 x s64>) = G_BITCAST [[LOAD]](<vscale x 16 x s8>)
+    ; CHECK-NEXT: $v8m2 = COPY [[BITCAST]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %2:_(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>), align 4)
+    %1:_(<vscale x 2 x s64>) = G_BITCAST %2(<vscale x 16 x s8>)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx2i64_align8
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx2i64_align8
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx2i64_align16
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx2i64_align16
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx2i64_align32
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx2i64_align32
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x s64>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+    ; CHECK-NEXT: $v8m2 = COPY [[LOAD]](<vscale x 2 x s64>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m2
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx1ptr
+alignment:       4
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx1ptr
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 1 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x p0>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x p0>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2ptr
+alignment:       4
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx2ptr
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 2 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+    ; CHECK-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x p0>)
+    ; CHECK-NEXT: PseudoRET implicit $v8
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x p0>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx8ptr
+alignment:       4
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; CHECK-LABEL: name: vload_nx8ptr
+    ; CHECK: liveins: $x10
+    ; CHECK-NEXT: {{  $}}
+    ; CHECK-NEXT: [[COPY:%[0-9]+]]:_(p0) = COPY $x10
+    ; CHECK-NEXT: [[LOAD:%[0-9]+]]:_(<vscale x 8 x p0>) = G_LOAD [[COPY]](p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+    ; CHECK-NEXT: $v8m4 = COPY [[LOAD]](<vscale x 8 x p0>)
+    ; CHECK-NEXT: PseudoRET implicit $v8m4
+    %0:_(p0) = COPY $x10
+    %1:_(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 8 x p0>)
+    PseudoRET implicit $v8m4
+
+...



More information about the llvm-commits mailing list