[llvm] [RISC-V][GISEL] Select G_BITCAST for scalable vectors (PR #101486)

Jiahan Xie via llvm-commits llvm-commits at lists.llvm.org
Mon Sep 23 13:49:42 PDT 2024


https://github.com/jiahanxie353 updated https://github.com/llvm/llvm-project/pull/101486

>From cea2c3e277e90cb4528706240b0573cefb127e9a Mon Sep 17 00:00:00 2001
From: Jiahan Xie <jx353 at cornell.edu>
Date: Thu, 1 Aug 2024 09:27:30 -0400
Subject: [PATCH 1/3] bitcast as copy?

---
 llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp | 1 +
 1 file changed, 1 insertion(+)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 92d00c26bd219c..16b0e7e0a56072 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -559,6 +559,7 @@ bool RISCVInstructionSelector::select(MachineInstr &MI) {
   case TargetOpcode::G_INTTOPTR:
   case TargetOpcode::G_TRUNC:
   case TargetOpcode::G_FREEZE:
+  case TargetOpcode::G_BITCAST:
     return selectCopy(MI, MRI);
   case TargetOpcode::G_CONSTANT: {
     Register DstReg = MI.getOperand(0).getReg();

>From 7d9d4f211cbd0cabeb9b3ffa9f2656558818253c Mon Sep 17 00:00:00 2001
From: Jiahan Xie <jx353 at cornell.edu>
Date: Mon, 23 Sep 2024 16:48:11 -0400
Subject: [PATCH 2/3] add instr select tests for load and store

---
 .../instruction-select/rvv/load.mir           | 869 ++++++++++++++++++
 .../instruction-select/rvv/store.mir          | 869 ++++++++++++++++++
 2 files changed, 1738 insertions(+)
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/load.mir
 create mode 100644 llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/store.mir

diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/load.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/load.mir
new file mode 100644
index 00000000000000..55b210879e66ee
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/load.mir
@@ -0,0 +1,869 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s
+--- |
+ 
+  define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) #0 {
+    %va = load <vscale x 1 x i8>, ptr %pa, align 1
+    ret <vscale x 1 x i8> %va
+  }
+  
+  define <vscale x 2 x i8> @vload_nx2i8(ptr %pa) #0 {
+    %va = load <vscale x 2 x i8>, ptr %pa, align 2
+    ret <vscale x 2 x i8> %va
+  }
+  
+  define <vscale x 4 x i8> @vload_nx4i8(ptr %pa) #0 {
+    %va = load <vscale x 4 x i8>, ptr %pa, align 4
+    ret <vscale x 4 x i8> %va
+  }
+  
+  define <vscale x 8 x i8> @vload_nx8i8(ptr %pa) #0 {
+    %va = load <vscale x 8 x i8>, ptr %pa, align 8
+    ret <vscale x 8 x i8> %va
+  }
+  
+  define <vscale x 16 x i8> @vload_nx16i8(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 16
+    ret <vscale x 16 x i8> %va
+  }
+  
+  define <vscale x 32 x i8> @vload_nx32i8(ptr %pa) #0 {
+    %va = load <vscale x 32 x i8>, ptr %pa, align 32
+    ret <vscale x 32 x i8> %va
+  }
+  
+  define <vscale x 64 x i8> @vload_nx64i8(ptr %pa) #0 {
+    %va = load <vscale x 64 x i8>, ptr %pa, align 64
+    ret <vscale x 64 x i8> %va
+  }
+  
+  define <vscale x 1 x i16> @vload_nx1i16(ptr %pa) #0 {
+    %va = load <vscale x 1 x i16>, ptr %pa, align 2
+    ret <vscale x 1 x i16> %va
+  }
+  
+  define <vscale x 2 x i16> @vload_nx2i16(ptr %pa) #0 {
+    %va = load <vscale x 2 x i16>, ptr %pa, align 4
+    ret <vscale x 2 x i16> %va
+  }
+  
+  define <vscale x 4 x i16> @vload_nx4i16(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 8
+    ret <vscale x 4 x i16> %va
+  }
+  
+  define <vscale x 8 x i16> @vload_nx8i16(ptr %pa) #0 {
+    %va = load <vscale x 8 x i16>, ptr %pa, align 16
+    ret <vscale x 8 x i16> %va
+  }
+  
+  define <vscale x 16 x i16> @vload_nx16i16(ptr %pa) #0 {
+    %va = load <vscale x 16 x i16>, ptr %pa, align 32
+    ret <vscale x 16 x i16> %va
+  }
+  
+  define <vscale x 32 x i16> @vload_nx32i16(ptr %pa) #0 {
+    %va = load <vscale x 32 x i16>, ptr %pa, align 64
+    ret <vscale x 32 x i16> %va
+  }
+  
+  define <vscale x 1 x i32> @vload_nx1i32(ptr %pa) #0 {
+    %va = load <vscale x 1 x i32>, ptr %pa, align 4
+    ret <vscale x 1 x i32> %va
+  }
+  
+  define <vscale x 2 x i32> @vload_nx2i32(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 8
+    ret <vscale x 2 x i32> %va
+  }
+  
+  define <vscale x 4 x i32> @vload_nx4i32(ptr %pa) #0 {
+    %va = load <vscale x 4 x i32>, ptr %pa, align 16
+    ret <vscale x 4 x i32> %va
+  }
+  
+  define <vscale x 8 x i32> @vload_nx8i32(ptr %pa) #0 {
+    %va = load <vscale x 8 x i32>, ptr %pa, align 32
+    ret <vscale x 8 x i32> %va
+  }
+  
+  define <vscale x 16 x i32> @vload_nx16i32(ptr %pa) #0 {
+    %va = load <vscale x 16 x i32>, ptr %pa, align 64
+    ret <vscale x 16 x i32> %va
+  }
+  
+  define <vscale x 1 x i64> @vload_nx1i64(ptr %pa) #0 {
+    %va = load <vscale x 1 x i64>, ptr %pa, align 8
+    ret <vscale x 1 x i64> %va
+  }
+  
+  define <vscale x 2 x i64> @vload_nx2i64(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 16
+    ret <vscale x 2 x i64> %va
+  }
+  
+  define <vscale x 4 x i64> @vload_nx4i64(ptr %pa) #0 {
+    %va = load <vscale x 4 x i64>, ptr %pa, align 32
+    ret <vscale x 4 x i64> %va
+  }
+  
+  define <vscale x 8 x i64> @vload_nx8i64(ptr %pa) #0 {
+    %va = load <vscale x 8 x i64>, ptr %pa, align 64
+    ret <vscale x 8 x i64> %va
+  }
+  
+  define <vscale x 16 x i8> @vload_nx16i8_align1(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 1
+    ret <vscale x 16 x i8> %va
+  }
+  
+  define <vscale x 16 x i8> @vload_nx16i8_align2(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 2
+    ret <vscale x 16 x i8> %va
+  }
+  
+  define <vscale x 16 x i8> @vload_nx16i8_align16(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 16
+    ret <vscale x 16 x i8> %va
+  }
+  
+  define <vscale x 16 x i8> @vload_nx16i8_align64(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 64
+    ret <vscale x 16 x i8> %va
+  }
+  
+  define <vscale x 4 x i16> @vload_nx4i16_align1(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 1
+    ret <vscale x 4 x i16> %va
+  }
+  
+  define <vscale x 4 x i16> @vload_nx4i16_align2(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 2
+    ret <vscale x 4 x i16> %va
+  }
+  
+  define <vscale x 4 x i16> @vload_nx4i16_align4(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 4
+    ret <vscale x 4 x i16> %va
+  }
+  
+  define <vscale x 4 x i16> @vload_nx4i16_align8(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 8
+    ret <vscale x 4 x i16> %va
+  }
+  
+  define <vscale x 4 x i16> @vload_nx4i16_align16(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 16
+    ret <vscale x 4 x i16> %va
+  }
+  
+  define <vscale x 2 x i32> @vload_nx2i32_align2(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 2
+    ret <vscale x 2 x i32> %va
+  }
+  
+  define <vscale x 2 x i32> @vload_nx2i32_align4(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 4
+    ret <vscale x 2 x i32> %va
+  }
+  
+  define <vscale x 2 x i32> @vload_nx2i32_align8(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 8
+    ret <vscale x 2 x i32> %va
+  }
+  
+  define <vscale x 2 x i32> @vload_nx2i32_align16(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 16
+    ret <vscale x 2 x i32> %va
+  }
+  
+  define <vscale x 2 x i32> @vload_nx2i32_align256(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 256
+    ret <vscale x 2 x i32> %va
+  }
+  
+  define <vscale x 2 x i64> @vload_nx2i64_align4(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 4
+    ret <vscale x 2 x i64> %va
+  }
+  
+  define <vscale x 2 x i64> @vload_nx2i64_align8(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 8
+    ret <vscale x 2 x i64> %va
+  }
+  
+  define <vscale x 2 x i64> @vload_nx2i64_align16(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 16
+    ret <vscale x 2 x i64> %va
+  }
+  
+  define <vscale x 2 x i64> @vload_nx2i64_align32(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 32
+    ret <vscale x 2 x i64> %va
+  }
+  
+  define <vscale x 1 x ptr> @vload_nx1ptr(ptr %pa) #0 {
+    %va = load <vscale x 1 x ptr>, ptr %pa, align 4
+    ret <vscale x 1 x ptr> %va
+  }
+  
+  define <vscale x 2 x ptr> @vload_nx2ptr(ptr %pa) #0 {
+    %va = load <vscale x 2 x ptr>, ptr %pa, align 8
+    ret <vscale x 2 x ptr> %va
+  }
+  
+  define <vscale x 8 x ptr> @vload_nx8ptr(ptr %pa) #0 {
+    %va = load <vscale x 8 x ptr>, ptr %pa, align 32
+    ret <vscale x 8 x ptr> %va
+  }
+  
+...
+---
+name:            vload_nx1i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 1 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 4 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx8i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 8 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx16i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx32i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 32 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s8>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 32 x s8>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vload_nx64i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 64 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 64 x s8>) from %ir.pa)
+    $v8m8 = COPY %1(<vscale x 64 x s8>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vload_nx1i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 1 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s16>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s16>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx8i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 8 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s16>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 8 x s16>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx16i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s16>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 16 x s16>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vload_nx32i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 32 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 32 x s16>) from %ir.pa)
+    $v8m8 = COPY %1(<vscale x 32 x s16>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vload_nx1i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 1 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s32>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s32>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 4 x s32>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx8i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 8 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s32>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 8 x s32>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vload_nx16i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s32>) from %ir.pa)
+    $v8m8 = COPY %1(<vscale x 16 x s32>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vload_nx1i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 1 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 1 x s64>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x s64>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx4i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s64>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 4 x s64>)
+    PseudoRET implicit $v8m4
+
+...
+---
+name:            vload_nx8i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 8 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s64>) from %ir.pa)
+    $v8m8 = COPY %1(<vscale x 8 x s64>)
+    PseudoRET implicit $v8m8
+
+...
+---
+name:            vload_nx16i8_align1
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 1)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx16i8_align2
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 2)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx16i8_align16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx16i8_align64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 64)
+    $v8m2 = COPY %1(<vscale x 16 x s8>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx4i16_align1
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 1)
+    %1:vrb(<vscale x 4 x s16>) = G_BITCAST %2(<vscale x 8 x s8>)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i16_align2
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 2)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i16_align4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 4)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i16_align8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx4i16_align16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = G_LOAD %0(p0) :: (load (<vscale x 4 x s16>) from %ir.pa, align 16)
+    $v8 = COPY %1(<vscale x 4 x s16>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32_align2
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(<vscale x 8 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 8 x s8>) from %ir.pa, align 2)
+    %1:vrb(<vscale x 2 x s32>) = G_BITCAST %2(<vscale x 8 x s8>)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32_align4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 4)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32_align8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32_align16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 16)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i32_align256
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s32>) from %ir.pa, align 256)
+    $v8 = COPY %1(<vscale x 2 x s32>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i64_align4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(<vscale x 16 x s8>) = G_LOAD %0(p0) :: (load (<vscale x 16 x s8>) from %ir.pa, align 4)
+    %1:vrb(<vscale x 2 x s64>) = G_BITCAST %2(<vscale x 16 x s8>)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx2i64_align8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 8)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx2i64_align16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx2i64_align32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = G_LOAD %0(p0) :: (load (<vscale x 2 x s64>) from %ir.pa, align 32)
+    $v8m2 = COPY %1(<vscale x 2 x s64>)
+    PseudoRET implicit $v8m2
+
+...
+---
+name:            vload_nx1ptr
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 1 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 1 x p0>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x p0>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2ptr
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 2 x p0>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 2 x p0>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx8ptr
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 8 x p0>) = G_LOAD %0(p0) :: (load (<vscale x 8 x p0>) from %ir.pa)
+    $v8m4 = COPY %1(<vscale x 8 x p0>)
+    PseudoRET implicit $v8m4
+
+...
diff --git a/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/store.mir b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/store.mir
new file mode 100644
index 00000000000000..05c00f672dea37
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/GlobalISel/instruction-select/rvv/store.mir
@@ -0,0 +1,869 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+v -run-pass=instruction-select -simplify-mir -verify-machineinstrs %s -o - | FileCheck -check-prefix=RV64I %s
+--- |
+ 
+  define void @vstore_nx1i8(ptr %pa, <vscale x 1 x i8> %b) #0 {
+    store <vscale x 1 x i8> %b, ptr %pa, align 1
+    ret void
+  }
+  
+  define void @vstore_nx2i8(ptr %pa, <vscale x 2 x i8> %b) #0 {
+    store <vscale x 2 x i8> %b, ptr %pa, align 2
+    ret void
+  }
+  
+  define void @vstore_nx4i8(ptr %pa, <vscale x 4 x i8> %b) #0 {
+    store <vscale x 4 x i8> %b, ptr %pa, align 4
+    ret void
+  }
+  
+  define void @vstore_nx8i8(ptr %pa, <vscale x 8 x i8> %b) #0 {
+    store <vscale x 8 x i8> %b, ptr %pa, align 8
+    ret void
+  }
+  
+  define void @vstore_nx16i8(ptr %pa, <vscale x 16 x i8> %b) #0 {
+    store <vscale x 16 x i8> %b, ptr %pa, align 16
+    ret void
+  }
+  
+  define void @vstore_nx32i8(ptr %pa, <vscale x 32 x i8> %b) #0 {
+    store <vscale x 32 x i8> %b, ptr %pa, align 32
+    ret void
+  }
+  
+  define void @vstore_nx64i8(ptr %pa, <vscale x 64 x i8> %b) #0 {
+    store <vscale x 64 x i8> %b, ptr %pa, align 64
+    ret void
+  }
+  
+  define void @vstore_nx1i16(ptr %pa, <vscale x 1 x i16> %b) #0 {
+    store <vscale x 1 x i16> %b, ptr %pa, align 2
+    ret void
+  }
+  
+  define void @vstore_nx2i16(ptr %pa, <vscale x 2 x i16> %b) #0 {
+    store <vscale x 2 x i16> %b, ptr %pa, align 4
+    ret void
+  }
+  
+  define void @vstore_nx4i16(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 8
+    ret void
+  }
+  
+  define void @vstore_nx8i16(ptr %pa, <vscale x 8 x i16> %b) #0 {
+    store <vscale x 8 x i16> %b, ptr %pa, align 16
+    ret void
+  }
+  
+  define void @vstore_nx16i16(ptr %pa, <vscale x 16 x i16> %b) #0 {
+    store <vscale x 16 x i16> %b, ptr %pa, align 32
+    ret void
+  }
+  
+  define void @vstore_nx32i16(ptr %pa, <vscale x 32 x i16> %b) #0 {
+    store <vscale x 32 x i16> %b, ptr %pa, align 64
+    ret void
+  }
+  
+  define void @vstore_nx1i32(ptr %pa, <vscale x 1 x i32> %b) #0 {
+    store <vscale x 1 x i32> %b, ptr %pa, align 4
+    ret void
+  }
+  
+  define void @vstore_nx2i32(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 8
+    ret void
+  }
+  
+  define void @vstore_nx4i32(ptr %pa, <vscale x 4 x i32> %b) #0 {
+    store <vscale x 4 x i32> %b, ptr %pa, align 16
+    ret void
+  }
+  
+  define void @vstore_nx8i32(ptr %pa, <vscale x 8 x i32> %b) #0 {
+    store <vscale x 8 x i32> %b, ptr %pa, align 32
+    ret void
+  }
+  
+  define void @vstore_nx16i32(ptr %pa, <vscale x 16 x i32> %b) #0 {
+    store <vscale x 16 x i32> %b, ptr %pa, align 64
+    ret void
+  }
+  
+  define void @vstore_nx1i64(ptr %pa, <vscale x 1 x i64> %b) #0 {
+    store <vscale x 1 x i64> %b, ptr %pa, align 8
+    ret void
+  }
+  
+  define void @vstore_nx2i64(ptr %pa, <vscale x 2 x i64> %b) #0 {
+    store <vscale x 2 x i64> %b, ptr %pa, align 16
+    ret void
+  }
+  
+  define void @vstore_nx4i64(ptr %pa, <vscale x 4 x i64> %b) #0 {
+    store <vscale x 4 x i64> %b, ptr %pa, align 32
+    ret void
+  }
+  
+  define void @vstore_nx8i64(ptr %pa, <vscale x 8 x i64> %b) #0 {
+    store <vscale x 8 x i64> %b, ptr %pa, align 64
+    ret void
+  }
+  
+  define void @vstore_nx16i8_align1(ptr %pa, <vscale x 16 x i8> %b) #0 {
+    store <vscale x 16 x i8> %b, ptr %pa, align 1
+    ret void
+  }
+  
+  define void @vstore_nx16i8_align2(ptr %pa, <vscale x 16 x i8> %b) #0 {
+    store <vscale x 16 x i8> %b, ptr %pa, align 2
+    ret void
+  }
+  
+  define void @vstore_nx16i8_align16(ptr %pa, <vscale x 16 x i8> %b) #0 {
+    store <vscale x 16 x i8> %b, ptr %pa, align 16
+    ret void
+  }
+  
+  define void @vstore_nx16i8_align64(ptr %pa, <vscale x 16 x i8> %b) #0 {
+    store <vscale x 16 x i8> %b, ptr %pa, align 64
+    ret void
+  }
+  
+  define void @vstore_nx4i16_align1(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 1
+    ret void
+  }
+  
+  define void @vstore_nx4i16_align2(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 2
+    ret void
+  }
+  
+  define void @vstore_nx4i16_align4(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 4
+    ret void
+  }
+  
+  define void @vstore_nx4i16_align8(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 8
+    ret void
+  }
+  
+  define void @vstore_nx4i16_align16(ptr %pa, <vscale x 4 x i16> %b) #0 {
+    store <vscale x 4 x i16> %b, ptr %pa, align 16
+    ret void
+  }
+  
+  define void @vstore_nx2i32_align2(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 2
+    ret void
+  }
+  
+  define void @vstore_nx2i32_align4(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 4
+    ret void
+  }
+  
+  define void @vstore_nx2i32_align8(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 8
+    ret void
+  }
+  
+  define void @vstore_nx2i32_align16(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 16
+    ret void
+  }
+  
+  define void @vstore_nx2i32_align256(ptr %pa, <vscale x 2 x i32> %b) #0 {
+    store <vscale x 2 x i32> %b, ptr %pa, align 256
+    ret void
+  }
+  
+  define void @vstore_nx2i64_align4(ptr %pa, <vscale x 2 x i64> %b) #0 {
+    store <vscale x 2 x i64> %b, ptr %pa, align 4
+    ret void
+  }
+  
+  define void @vstore_nx2i64_align8(ptr %pa, <vscale x 2 x i64> %b) #0 {
+    store <vscale x 2 x i64> %b, ptr %pa, align 8
+    ret void
+  }
+  
+  define void @vstore_nx2i64_align16(ptr %pa, <vscale x 2 x i64> %b) #0 {
+    store <vscale x 2 x i64> %b, ptr %pa, align 16
+    ret void
+  }
+  
+  define void @vstore_nx2i64_align32(ptr %pa, <vscale x 2 x i64> %b) #0 {
+    store <vscale x 2 x i64> %b, ptr %pa, align 32
+    ret void
+  }
+  
+  define void @vstore_nx1ptr(ptr %pa, <vscale x 1 x ptr> %b) #0 {
+    store <vscale x 1 x ptr> %b, ptr %pa, align 4
+    ret void
+  }
+  
+  define void @vstore_nx2ptr(ptr %pa, <vscale x 2 x ptr> %b) #0 {
+    store <vscale x 2 x ptr> %b, ptr %pa, align 8
+    ret void
+  }
+  
+  define void @vstore_nx8ptr(ptr %pa, <vscale x 8 x ptr> %b) #0 {
+    store <vscale x 8 x ptr> %b, ptr %pa, align 32
+    ret void
+  }
+  
+...
+---
+name:            vstore_nx1i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 1 x s8>) = COPY $v8
+    G_STORE %1(<vscale x 1 x s8>), %0(p0) :: (store (<vscale x 1 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s8>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s8>), %0(p0) :: (store (<vscale x 2 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s8>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s8>), %0(p0) :: (store (<vscale x 4 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx8i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 8 x s8>) = COPY $v8
+    G_STORE %1(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx32i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m4
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 32 x s8>) = COPY $v8m4
+    G_STORE %1(<vscale x 32 x s8>), %0(p0) :: (store (<vscale x 32 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx64i8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m8
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 64 x s8>) = COPY $v8m8
+    G_STORE %1(<vscale x 64 x s8>), %0(p0) :: (store (<vscale x 64 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx1i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 1 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 1 x s16>), %0(p0) :: (store (<vscale x 1 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s16>), %0(p0) :: (store (<vscale x 2 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx8i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 8 x s16>) = COPY $v8m2
+    G_STORE %1(<vscale x 8 x s16>), %0(p0) :: (store (<vscale x 8 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m4
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s16>) = COPY $v8m4
+    G_STORE %1(<vscale x 16 x s16>), %0(p0) :: (store (<vscale x 16 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx32i16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m8
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 32 x s16>) = COPY $v8m8
+    G_STORE %1(<vscale x 32 x s16>), %0(p0) :: (store (<vscale x 32 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx1i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 1 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 1 x s32>), %0(p0) :: (store (<vscale x 1 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s32>) = COPY $v8m2
+    G_STORE %1(<vscale x 4 x s32>), %0(p0) :: (store (<vscale x 4 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx8i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m4
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 8 x s32>) = COPY $v8m4
+    G_STORE %1(<vscale x 8 x s32>), %0(p0) :: (store (<vscale x 8 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m8
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s32>) = COPY $v8m8
+    G_STORE %1(<vscale x 16 x s32>), %0(p0) :: (store (<vscale x 16 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx1i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 1 x s64>) = COPY $v8
+    G_STORE %1(<vscale x 1 x s64>), %0(p0) :: (store (<vscale x 1 x s64>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m4
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s64>) = COPY $v8m4
+    G_STORE %1(<vscale x 4 x s64>), %0(p0) :: (store (<vscale x 4 x s64>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx8i64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m8
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 8 x s64>) = COPY $v8m8
+    G_STORE %1(<vscale x 8 x s64>), %0(p0) :: (store (<vscale x 8 x s64>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i8_align1
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 1)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i8_align2
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 2)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i8_align16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx16i8_align64
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 16 x s8>) = COPY $v8m2
+    G_STORE %1(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 64)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16_align1
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = COPY $v8
+    %2:vrb(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 4 x s16>)
+    G_STORE %2(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 1)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16_align2
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 2)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16_align4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 4)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16_align8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx4i16_align16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 4 x s16>) = COPY $v8
+    G_STORE %1(<vscale x 4 x s16>), %0(p0) :: (store (<vscale x 4 x s16>) into %ir.pa, align 16)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32_align2
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = COPY $v8
+    %2:vrb(<vscale x 8 x s8>) = G_BITCAST %1(<vscale x 2 x s32>)
+    G_STORE %2(<vscale x 8 x s8>), %0(p0) :: (store (<vscale x 8 x s8>) into %ir.pa, align 2)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32_align4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 4)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32_align8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32_align16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 16)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i32_align256
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s32>) = COPY $v8
+    G_STORE %1(<vscale x 2 x s32>), %0(p0) :: (store (<vscale x 2 x s32>) into %ir.pa, align 256)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i64_align4
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    %2:vrb(<vscale x 16 x s8>) = G_BITCAST %1(<vscale x 2 x s64>)
+    G_STORE %2(<vscale x 16 x s8>), %0(p0) :: (store (<vscale x 16 x s8>) into %ir.pa, align 4)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i64_align8
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 8)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i64_align16
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2i64_align32
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m2
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x s64>) = COPY $v8m2
+    G_STORE %1(<vscale x 2 x s64>), %0(p0) :: (store (<vscale x 2 x s64>) into %ir.pa, align 32)
+    PseudoRET
+
+...
+---
+name:            vstore_nx1ptr
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 1 x p0>) = COPY $v8
+    G_STORE %1(<vscale x 1 x p0>), %0(p0) :: (store (<vscale x 1 x p0>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx2ptr
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $v8, $x10
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 2 x p0>) = COPY $v8
+    G_STORE %1(<vscale x 2 x p0>), %0(p0) :: (store (<vscale x 2 x p0>) into %ir.pa)
+    PseudoRET
+
+...
+---
+name:            vstore_nx8ptr
+legalized:       true
+regBankSelected: true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10, $v8m4
+  
+    %0:gprb(p0) = COPY $x10
+    %1:vrb(<vscale x 8 x p0>) = COPY $v8m4
+    G_STORE %1(<vscale x 8 x p0>), %0(p0) :: (store (<vscale x 8 x p0>) into %ir.pa)
+    PseudoRET
+
+...

>From 907dbb3d9c8dc28590abdf1ff9a808ea64789fae Mon Sep 17 00:00:00 2001
From: Jiahan Xie <jx353 at cornell.edu>
Date: Mon, 23 Sep 2024 16:48:48 -0400
Subject: [PATCH 3/3] pre instr select lower to force load/store of pointers to
 xlen integers

---
 .../RISCV/GISel/RISCVInstructionSelector.cpp  | 20 +++++++++++++++++++
 1 file changed, 20 insertions(+)

diff --git a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
index 16b0e7e0a56072..1b55d03947d31b 100644
--- a/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
+++ b/llvm/lib/Target/RISCV/GISel/RISCVInstructionSelector.cpp
@@ -786,6 +786,26 @@ void RISCVInstructionSelector::preISelLower(MachineInstr &MI,
     replacePtrWithInt(MI.getOperand(1), MIB, MRI);
     MI.setDesc(TII.get(TargetOpcode::G_AND));
     MRI.setType(DstReg, sXLen);
+    break;
+  }
+  case TargetOpcode::G_LOAD: {
+    Register DstReg = MI.getOperand(0).getReg();
+    const LLT DstTy = MRI.getType(DstReg);
+    if (!(DstTy.isVector() && DstTy.getElementType().isPointer()))
+        break;
+    const LLT sXLen = LLT::scalar(STI.getXLen());
+    MRI.setType(DstReg, LLT::scalable_vector(DstTy.getElementCount().getKnownMinValue(), sXLen));
+    break;
+  }
+  case TargetOpcode::G_STORE: {
+    MachineOperand &SrcOp = MI.getOperand(0);
+    const LLT SrcTy = MRI.getType(SrcOp.getReg());
+    if (!(SrcTy.isVector() && SrcTy.getElementType().isPointer()))
+        break;
+    const LLT sXLen = LLT::scalar(STI.getXLen());
+    auto Copy = MIB.buildCopy(LLT::scalable_vector(SrcTy.getElementCount().getKnownMinValue(), sXLen), SrcOp);
+    Register NewSrc = Copy.getReg(0);
+    SrcOp.setReg(NewSrc);
   }
   }
 }



More information about the llvm-commits mailing list