[llvm] 18013be - [RISCV] Add tests for unaligned segmented loads and stores

Luke Lau via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 7 07:34:30 PDT 2023


Author: Luke Lau
Date: 2023-07-07T15:34:22+01:00
New Revision: 18013bea468849d8be1b6a541a0cff0502877286

URL: https://github.com/llvm/llvm-project/commit/18013bea468849d8be1b6a541a0cff0502877286
DIFF: https://github.com/llvm/llvm-project/commit/18013bea468849d8be1b6a541a0cff0502877286.diff

LOG: [RISCV] Add tests for unaligned segmented loads and stores

Reviewed By: reames

Differential Revision: https://reviews.llvm.org/D154535

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll
    llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleave-store.ll
    llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
    llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll
index e13604ee6b7f34..d9a13afbce6caa 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-deinterleave-load.ll
@@ -82,6 +82,18 @@ define {<16 x i8>, <16 x i8>} @vector_deinterleave_load_v16i8_v32i8(ptr %p) {
   ret {<16 x i8>, <16 x i8>} %retval
 }
 
+; FIXME: Shouldn't be lowered to vlseg because it's unaligned
+define {<8 x i16>, <8 x i16>} @vector_deinterleave_load_v8i16_v16i16_align1(ptr %p) {
+; CHECK-LABEL: vector_deinterleave_load_v8i16_v16i16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vlseg2e16.v v8, (a0)
+; CHECK-NEXT:    ret
+  %vec = load <16 x i16>, ptr %p, align 1
+  %retval = call {<8 x i16>, <8 x i16>} @llvm.experimental.vector.deinterleave2.v16i16(<16 x i16> %vec)
+  ret {<8 x i16>, <8 x i16>} %retval
+}
+
 define {<8 x i16>, <8 x i16>} @vector_deinterleave_load_v8i16_v16i16(ptr %p) {
 ; CHECK-LABEL: vector_deinterleave_load_v8i16_v16i16:
 ; CHECK:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleave-store.ll
index e2ff4ecd18403f..25c7e851f422ba 100644
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleave-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-interleave-store.ll
@@ -29,6 +29,18 @@ define void @vector_interleave_store_v32i1_v16i1(<16 x i1> %a, <16 x i1> %b, ptr
   ret void
 }
 
+; FIXME: Shouldn't be lowered to vsseg because it's unaligned
+define void @vector_interleave_store_v16i16_v8i16_align1(<8 x i16> %a, <8 x i16> %b, ptr %p) {
+; CHECK-LABEL: vector_interleave_store_v16i16_v8i16_align1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT:    vsseg2e16.v v8, (a0)
+; CHECK-NEXT:    ret
+  %res = call <16 x i16> @llvm.experimental.vector.interleave2.v16i16(<8 x i16> %a, <8 x i16> %b)
+  store <16 x i16> %res, ptr %p, align 1
+  ret void
+}
+
 define void @vector_interleave_store_v16i16_v8i16(<8 x i16> %a, <8 x i16> %b, ptr %p) {
 ; CHECK-LABEL: vector_interleave_store_v16i16_v8i16:
 ; CHECK:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
index 5536a6ebc1510d..326fd78c3cc3e2 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-deinterleave-load.ll
@@ -39,6 +39,18 @@ define {<vscale x 16 x i8>, <vscale x 16 x i8>} @vector_deinterleave_load_nxv16i
   ret {<vscale x 16 x i8>, <vscale x 16 x i8>} %retval
 }
 
+; FIXME: Shouldn't be lowered to vlseg because it's unaligned
+define {<vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_load_nxv8i16_nxv16i16_align1(ptr %p) {
+; CHECK-LABEL: vector_deinterleave_load_nxv8i16_nxv16i16_align1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vlseg2e16.v v8, (a0)
+; CHECK-NEXT:    ret
+  %vec = load <vscale x 16 x i16>, ptr %p, align 1
+  %retval = call {<vscale x 8 x i16>, <vscale x 8 x i16>} @llvm.experimental.vector.deinterleave2.nxv16i16(<vscale x 16 x i16> %vec)
+  ret {<vscale x 8 x i16>, <vscale x 8 x i16>} %retval
+}
+
 define {<vscale x 8 x i16>, <vscale x 8 x i16>} @vector_deinterleave_load_nxv8i16_nxv16i16(ptr %p) {
 ; CHECK-LABEL: vector_deinterleave_load_nxv8i16_nxv16i16:
 ; CHECK:       # %bb.0:

diff  --git a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
index 50dd979bc79912..909dc3461c5ca9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vector-interleave-store.ll
@@ -32,6 +32,18 @@ define void @vector_interleave_store_nxv32i1_nxv16i1(<vscale x 16 x i1> %a, <vsc
   ret void
 }
 
+; FIXME: Shouldn't be lowered to vsseg because it's unaligned
+define void @vector_interleave_store_nxv16i16_nxv8i16_align1(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, ptr %p) {
+; CHECK-LABEL: vector_interleave_store_nxv16i16_nxv8i16_align1:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, ma
+; CHECK-NEXT:    vsseg2e16.v v8, (a0)
+; CHECK-NEXT:    ret
+  %res = call <vscale x 16 x i16> @llvm.experimental.vector.interleave2.nxv16i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b)
+  store <vscale x 16 x i16> %res, ptr %p, align 1
+  ret void
+}
+
 define void @vector_interleave_store_nxv16i16_nxv8i16(<vscale x 8 x i16> %a, <vscale x 8 x i16> %b, ptr %p) {
 ; CHECK-LABEL: vector_interleave_store_nxv16i16_nxv8i16:
 ; CHECK:       # %bb.0:


        


More information about the llvm-commits mailing list