[llvm] [GISEL][RISCV] RegBank Scalable Vector Load/Store (PR #99932)

Jiahan Xie via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 30 11:32:44 PDT 2024


================
@@ -0,0 +1,1612 @@
+# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
+# RUN: llc -mtriple=riscv32 -mattr=+m,+v -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefix=RV32I %s
+# RUN: llc -mtriple=riscv64 -mattr=+m,+v -run-pass=regbankselect \
+# RUN:   -disable-gisel-legality-check -simplify-mir -verify-machineinstrs %s \
+# RUN:   -o - | FileCheck -check-prefix=RV64I %s
+--- |
+
+  define <vscale x 1 x i8> @vload_nx1i8(ptr %pa) #0 {
+    %va = load <vscale x 1 x i8>, ptr %pa, align 1
+    ret <vscale x 1 x i8> %va
+  }
+
+  define <vscale x 2 x i8> @vload_nx2i8(ptr %pa) #0 {
+    %va = load <vscale x 2 x i8>, ptr %pa, align 2
+    ret <vscale x 2 x i8> %va
+  }
+
+  define <vscale x 4 x i8> @vload_nx4i8(ptr %pa) #0 {
+    %va = load <vscale x 4 x i8>, ptr %pa, align 4
+    ret <vscale x 4 x i8> %va
+  }
+
+  define <vscale x 8 x i8> @vload_nx8i8(ptr %pa) #0 {
+    %va = load <vscale x 8 x i8>, ptr %pa, align 8
+    ret <vscale x 8 x i8> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nx16i8(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 16
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 32 x i8> @vload_nx32i8(ptr %pa) #0 {
+    %va = load <vscale x 32 x i8>, ptr %pa, align 32
+    ret <vscale x 32 x i8> %va
+  }
+
+  define <vscale x 64 x i8> @vload_nx64i8(ptr %pa) #0 {
+    %va = load <vscale x 64 x i8>, ptr %pa, align 64
+    ret <vscale x 64 x i8> %va
+  }
+
+  define <vscale x 1 x i16> @vload_nx1i16(ptr %pa) #0 {
+    %va = load <vscale x 1 x i16>, ptr %pa, align 2
+    ret <vscale x 1 x i16> %va
+  }
+
+  define <vscale x 2 x i16> @vload_nx2i16(ptr %pa) #0 {
+    %va = load <vscale x 2 x i16>, ptr %pa, align 4
+    ret <vscale x 2 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 8
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 8 x i16> @vload_nx8i16(ptr %pa) #0 {
+    %va = load <vscale x 8 x i16>, ptr %pa, align 16
+    ret <vscale x 8 x i16> %va
+  }
+
+  define <vscale x 16 x i16> @vload_nx16i16(ptr %pa) #0 {
+    %va = load <vscale x 16 x i16>, ptr %pa, align 32
+    ret <vscale x 16 x i16> %va
+  }
+
+  define <vscale x 32 x i16> @vload_nx32i16(ptr %pa) #0 {
+    %va = load <vscale x 32 x i16>, ptr %pa, align 64
+    ret <vscale x 32 x i16> %va
+  }
+
+  define <vscale x 1 x i32> @vload_nx1i32(ptr %pa) #0 {
+    %va = load <vscale x 1 x i32>, ptr %pa, align 4
+    ret <vscale x 1 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 8
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 4 x i32> @vload_nx4i32(ptr %pa) #0 {
+    %va = load <vscale x 4 x i32>, ptr %pa, align 16
+    ret <vscale x 4 x i32> %va
+  }
+
+  define <vscale x 8 x i32> @vload_nx8i32(ptr %pa) #0 {
+    %va = load <vscale x 8 x i32>, ptr %pa, align 32
+    ret <vscale x 8 x i32> %va
+  }
+
+  define <vscale x 16 x i32> @vload_nx16i32(ptr %pa) #0 {
+    %va = load <vscale x 16 x i32>, ptr %pa, align 64
+    ret <vscale x 16 x i32> %va
+  }
+
+  define <vscale x 1 x i64> @vload_nx1i64(ptr %pa) #0 {
+    %va = load <vscale x 1 x i64>, ptr %pa, align 8
+    ret <vscale x 1 x i64> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nx2i64(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 16
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 4 x i64> @vload_nx4i64(ptr %pa) #0 {
+    %va = load <vscale x 4 x i64>, ptr %pa, align 32
+    ret <vscale x 4 x i64> %va
+  }
+
+  define <vscale x 8 x i64> @vload_nx8i64(ptr %pa) #0 {
+    %va = load <vscale x 8 x i64>, ptr %pa, align 64
+    ret <vscale x 8 x i64> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nx16i8_align1(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 1
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nx16i8_align2(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 2
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nx16i8_align16(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 16
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 16 x i8> @vload_nx16i8_align64(ptr %pa) #0 {
+    %va = load <vscale x 16 x i8>, ptr %pa, align 64
+    ret <vscale x 16 x i8> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16_align1(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 1
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16_align2(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 2
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16_align4(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 4
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16_align8(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 8
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 4 x i16> @vload_nx4i16_align16(ptr %pa) #0 {
+    %va = load <vscale x 4 x i16>, ptr %pa, align 16
+    ret <vscale x 4 x i16> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32_align2(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 2
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32_align4(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 4
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32_align8(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 8
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32_align16(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 16
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i32> @vload_nx2i32_align256(ptr %pa) #0 {
+    %va = load <vscale x 2 x i32>, ptr %pa, align 256
+    ret <vscale x 2 x i32> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nx2i64_align4(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 4
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nx2i64_align8(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 8
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nx2i64_align16(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 16
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 2 x i64> @vload_nx2i64_align32(ptr %pa) #0 {
+    %va = load <vscale x 2 x i64>, ptr %pa, align 32
+    ret <vscale x 2 x i64> %va
+  }
+
+  define <vscale x 1 x ptr> @vload_nx1ptr(ptr %pa) #0 {
+    %va = load <vscale x 1 x ptr>, ptr %pa, align 4
+    ret <vscale x 1 x ptr> %va
+  }
+
+  define <vscale x 2 x ptr> @vload_nx2ptr(ptr %pa) #0 {
+    %va = load <vscale x 2 x ptr>, ptr %pa, align 8
+    ret <vscale x 2 x ptr> %va
+  }
+
+  define <vscale x 8 x ptr> @vload_nx8ptr(ptr %pa) #0 {
+    %va = load <vscale x 8 x ptr>, ptr %pa, align 32
+    ret <vscale x 8 x ptr> %va
+  }
+
+  attributes #0 = { "target-features"="+v" }
+
+...
+---
+name:            vload_nx1i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx1i8
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx1i8
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 1 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 1 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
+    %1:vrb(<vscale x 1 x s8>) = G_LOAD %2(p0) :: (load (<vscale x 1 x s8>) from %ir.pa)
+    $v8 = COPY %1(<vscale x 1 x s8>)
+    PseudoRET implicit $v8
+
+...
+---
+name:            vload_nx2i8
+legalized:       true
+tracksRegLiveness: true
+body:             |
+  bb.1 (%ir-block.0):
+    liveins: $x10
+
+    ; RV32I-LABEL: name: vload_nx2i8
+    ; RV32I: liveins: $x10
+    ; RV32I-NEXT: {{  $}}
+    ; RV32I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV32I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV32I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+    ; RV32I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+    ; RV32I-NEXT: PseudoRET implicit $v8
+    ;
+    ; RV64I-LABEL: name: vload_nx2i8
+    ; RV64I: liveins: $x10
+    ; RV64I-NEXT: {{  $}}
+    ; RV64I-NEXT: [[COPY:%[0-9]+]]:gprb(p0) = COPY $x10
+    ; RV64I-NEXT: [[COPY1:%[0-9]+]]:vrb(p0) = COPY [[COPY]](p0)
+    ; RV64I-NEXT: [[LOAD:%[0-9]+]]:vrb(<vscale x 2 x s8>) = G_LOAD [[COPY1]](p0) :: (load (<vscale x 2 x s8>) from %ir.pa)
+    ; RV64I-NEXT: $v8 = COPY [[LOAD]](<vscale x 2 x s8>)
+    ; RV64I-NEXT: PseudoRET implicit $v8
+    %0:gprb(p0) = COPY $x10
+    %2:vrb(p0) = COPY %0(p0)
----------------
jiahanxie353 wrote:

makes sense, thanks @michaelmaitland and @topperc !

https://github.com/llvm/llvm-project/pull/99932


More information about the llvm-commits mailing list