[llvm] d0ad59d - [RISCV] Add codegen test coverage for strided load/store intrinsics

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 22 15:02:34 PDT 2022


Author: Philip Reames
Date: 2022-09-22T15:02:24-07:00
New Revision: d0ad59d63bbd0f35b9dabbe8b1239b724f44ab4e

URL: https://github.com/llvm/llvm-project/commit/d0ad59d63bbd0f35b9dabbe8b1239b724f44ab4e
DIFF: https://github.com/llvm/llvm-project/commit/d0ad59d63bbd0f35b9dabbe8b1239b724f44ab4e.diff

LOG: [RISCV] Add codegen test coverage for strided load/store intrinsics

Added: 
    llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll b/llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll
new file mode 100644
index 000000000000..731435c340bb
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/strided-load-store-intrinsics.ll
@@ -0,0 +1,112 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=riscv64 -mattr=+m,+v | FileCheck %s
+
+declare <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8>, ptr, i64, <32 x i1>)
+declare <2 x i64> @llvm.riscv.masked.strided.load.v2i64.p0.i64(<2 x i64>, ptr, i64, <2 x i1>)
+
+declare void @llvm.riscv.masked.strided.store.v32i8.p0.i64(<32 x i8>, ptr, i64, <32 x i1>)
+declare void @llvm.riscv.masked.strided.store.v2i64.p0.i64(<2 x i64>, ptr, i64, <2 x i1>)
+
+define <32 x i8> @strided_load_i8(ptr %p, i64 %stride, <32 x i1> %m) {
+; CHECK-LABEL: strided_load_i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a2, 32
+; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
+; CHECK-NEXT:    vlse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+  %res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 %stride, <32 x i1> %m)
+  ret <32 x i8> %res
+}
+
+define <2 x i64> @strided_load_i64(ptr %p, i64 %stride, <2 x i1> %m) {
+; CHECK-LABEL: strided_load_i64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT:    vlse64.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+  %res = call <2 x i64> @llvm.riscv.masked.strided.load.v2i64.p0.i64(<2 x i64> undef, ptr %p, i64 %stride, <2 x i1> %m)
+  ret <2 x i64> %res
+}
+
+define <32 x i8> @strided_load_i8_splat(ptr %p, <32 x i1> %m) {
+; CHECK-LABEL: strided_load_i8_splat:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT:    vlse8.v v8, (a0), zero, v0.t
+; CHECK-NEXT:    ret
+  %res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 0, <32 x i1> %m)
+  ret <32 x i8> %res
+}
+
+define <32 x i8> @strided_load_i8_reverse(ptr %p, <32 x i1> %m) {
+; CHECK-LABEL: strided_load_i8_reverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    li a2, -1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT:    vlse8.v v8, (a0), a2, v0.t
+; CHECK-NEXT:    ret
+  %res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 -1, <32 x i1> %m)
+  ret <32 x i8> %res
+}
+
+define <32 x i8> @strided_load_i8_nostride(ptr %p, <32 x i1> %m) {
+; CHECK-LABEL: strided_load_i8_nostride:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    li a2, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT:    vlse8.v v8, (a0), a2, v0.t
+; CHECK-NEXT:    ret
+  %res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 1, <32 x i1> %m)
+  ret <32 x i8> %res
+}
+
+
+define void @strided_store_i8(ptr %p, <32 x i8> %v, i64 %stride, <32 x i1> %m) {
+; CHECK-LABEL: strided_store_i8:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a2, 32
+; CHECK-NEXT:    vsetvli zero, a2, e8, m2, ta, mu
+; CHECK-NEXT:    vsse8.v v8, (a0), a1, v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.riscv.masked.strided.store.v32i8.p0.i64(<32 x i8> %v, ptr %p, i64 %stride, <32 x i1> %m)
+  ret void
+}
+
+define void @strided_store_i8_zero(ptr %p, <32 x i8> %v, <32 x i1> %m) {
+; CHECK-LABEL: strided_store_i8_zero:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT:    vsse8.v v8, (a0), zero, v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.riscv.masked.strided.store.v32i8.p0.i64(<32 x i8> %v, ptr %p, i64 0, <32 x i1> %m)
+  ret void
+}
+
+define void @strided_store_i8_nostride(ptr %p, <32 x i8> %v, <32 x i1> %m) {
+; CHECK-LABEL: strided_store_i8_nostride:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    li a2, 1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT:    vsse8.v v8, (a0), a2, v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.riscv.masked.strided.store.v32i8.p0.i64(<32 x i8> %v, ptr %p, i64 1, <32 x i1> %m)
+  ret void
+}
+
+define void @strided_store_i8_reverse(ptr %p, <32 x i8> %v, <32 x i1> %m) {
+; CHECK-LABEL: strided_store_i8_reverse:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    li a1, 32
+; CHECK-NEXT:    li a2, -1
+; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT:    vsse8.v v8, (a0), a2, v0.t
+; CHECK-NEXT:    ret
+  call void @llvm.riscv.masked.strided.store.v32i8.p0.i64(<32 x i8> %v, ptr %p, i64 -1, <32 x i1> %m)
+  ret void
+}
+


        


More information about the llvm-commits mailing list