[llvm] 4f057f5 - [RISCV] Expand memset.inline test coverage [nfc]

Philip Reames via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 20 17:37:45 PDT 2023


Author: Philip Reames
Date: 2023-07-20T17:37:36-07:00
New Revision: 4f057f5296723c6a11ea21b43a5f86fc9dde7c93

URL: https://github.com/llvm/llvm-project/commit/4f057f5296723c6a11ea21b43a5f86fc9dde7c93
DIFF: https://github.com/llvm/llvm-project/commit/4f057f5296723c6a11ea21b43a5f86fc9dde7c93.diff

LOG: [RISCV] Expand memset.inline test coverage [nfc]

Add coverage for unaligned overlap cases, and for vector stores.

Note that the vector memset here is coming from store combining, not memset lowering.

Added: 
    

Modified: 
    llvm/test/CodeGen/RISCV/memset-inline.ll
    llvm/test/CodeGen/RISCV/rvv/memset-inline.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/RISCV/memset-inline.ll b/llvm/test/CodeGen/RISCV/memset-inline.ll
index 777a90fc87fc44..92880d51e2662b 100644
--- a/llvm/test/CodeGen/RISCV/memset-inline.ll
+++ b/llvm/test/CodeGen/RISCV/memset-inline.ll
@@ -1242,3 +1242,46 @@ define void @aligned_bzero_64(ptr %a) nounwind {
   tail call void @llvm.memset.inline.p0.i64(ptr align 64 %a, i8 0, i64 64, i1 0)
   ret void
 }
+
+
+; /////////////////////////////////////////////////////////////////////////////
+; Usual overlap tricks
+
+define void @aligned_bzero_7(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_bzero_7:
+; RV32-BOTH:       # %bb.0:
+; RV32-BOTH-NEXT:    sb zero, 6(a0)
+; RV32-BOTH-NEXT:    sh zero, 4(a0)
+; RV32-BOTH-NEXT:    sw zero, 0(a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: aligned_bzero_7:
+; RV64-BOTH:       # %bb.0:
+; RV64-BOTH-NEXT:    sb zero, 6(a0)
+; RV64-BOTH-NEXT:    sh zero, 4(a0)
+; RV64-BOTH-NEXT:    sw zero, 0(a0)
+; RV64-BOTH-NEXT:    ret
+  tail call void @llvm.memset.inline.p0.i64(ptr align 8 %a, i8 0, i64 7, i1 0)
+  ret void
+}
+
+define void @aligned_bzero_15(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_bzero_15:
+; RV32-BOTH:       # %bb.0:
+; RV32-BOTH-NEXT:    sb zero, 14(a0)
+; RV32-BOTH-NEXT:    sh zero, 12(a0)
+; RV32-BOTH-NEXT:    sw zero, 8(a0)
+; RV32-BOTH-NEXT:    sw zero, 4(a0)
+; RV32-BOTH-NEXT:    sw zero, 0(a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: aligned_bzero_15:
+; RV64-BOTH:       # %bb.0:
+; RV64-BOTH-NEXT:    sb zero, 14(a0)
+; RV64-BOTH-NEXT:    sh zero, 12(a0)
+; RV64-BOTH-NEXT:    sw zero, 8(a0)
+; RV64-BOTH-NEXT:    sd zero, 0(a0)
+; RV64-BOTH-NEXT:    ret
+  tail call void @llvm.memset.inline.p0.i64(ptr align 8 %a, i8 0, i64 15, i1 0)
+  ret void
+}

diff  --git a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
index 170b72eb0e832b..eec67952598811 100644
--- a/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/memset-inline.ll
@@ -1217,3 +1217,91 @@ define void @aligned_bzero_64(ptr %a) nounwind {
   tail call void @llvm.memset.inline.p0.i64(ptr align 64 %a, i8 0, i64 64, i1 0)
   ret void
 }
+
+define void @aligned_bzero_66(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_bzero_66:
+; RV32-BOTH:       # %bb.0:
+; RV32-BOTH-NEXT:    sh zero, 64(a0)
+; RV32-BOTH-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-BOTH-NEXT:    vmv.v.i v8, 0
+; RV32-BOTH-NEXT:    vse32.v v8, (a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: aligned_bzero_66:
+; RV64-BOTH:       # %bb.0:
+; RV64-BOTH-NEXT:    sh zero, 64(a0)
+; RV64-BOTH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-BOTH-NEXT:    vmv.v.i v8, 0
+; RV64-BOTH-NEXT:    vse64.v v8, (a0)
+; RV64-BOTH-NEXT:    ret
+  tail call void @llvm.memset.inline.p0.i64(ptr align 64 %a, i8 0, i64 66, i1 0)
+  ret void
+}
+
+define void @aligned_bzero_96(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_bzero_96:
+; RV32-BOTH:       # %bb.0:
+; RV32-BOTH-NEXT:    addi a1, a0, 64
+; RV32-BOTH-NEXT:    vsetivli zero, 8, e32, m2, ta, ma
+; RV32-BOTH-NEXT:    vmv.v.i v8, 0
+; RV32-BOTH-NEXT:    vse32.v v8, (a1)
+; RV32-BOTH-NEXT:    vsetivli zero, 16, e32, m4, ta, ma
+; RV32-BOTH-NEXT:    vmv.v.i v8, 0
+; RV32-BOTH-NEXT:    vse32.v v8, (a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: aligned_bzero_96:
+; RV64-BOTH:       # %bb.0:
+; RV64-BOTH-NEXT:    addi a1, a0, 64
+; RV64-BOTH-NEXT:    vsetivli zero, 4, e64, m2, ta, ma
+; RV64-BOTH-NEXT:    vmv.v.i v8, 0
+; RV64-BOTH-NEXT:    vse64.v v8, (a1)
+; RV64-BOTH-NEXT:    vsetivli zero, 8, e64, m4, ta, ma
+; RV64-BOTH-NEXT:    vmv.v.i v8, 0
+; RV64-BOTH-NEXT:    vse64.v v8, (a0)
+; RV64-BOTH-NEXT:    ret
+  tail call void @llvm.memset.inline.p0.i64(ptr align 64 %a, i8 0, i64 96, i1 0)
+  ret void
+}
+
+define void @aligned_bzero_128(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_bzero_128:
+; RV32-BOTH:       # %bb.0:
+; RV32-BOTH-NEXT:    li a1, 32
+; RV32-BOTH-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-BOTH-NEXT:    vmv.v.i v8, 0
+; RV32-BOTH-NEXT:    vse32.v v8, (a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: aligned_bzero_128:
+; RV64-BOTH:       # %bb.0:
+; RV64-BOTH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-BOTH-NEXT:    vmv.v.i v8, 0
+; RV64-BOTH-NEXT:    vse64.v v8, (a0)
+; RV64-BOTH-NEXT:    ret
+  tail call void @llvm.memset.inline.p0.i64(ptr align 64 %a, i8 0, i64 128, i1 0)
+  ret void
+}
+
+define void @aligned_bzero_256(ptr %a) nounwind {
+; RV32-BOTH-LABEL: aligned_bzero_256:
+; RV32-BOTH:       # %bb.0:
+; RV32-BOTH-NEXT:    li a1, 32
+; RV32-BOTH-NEXT:    vsetvli zero, a1, e32, m8, ta, ma
+; RV32-BOTH-NEXT:    vmv.v.i v8, 0
+; RV32-BOTH-NEXT:    addi a1, a0, 128
+; RV32-BOTH-NEXT:    vse32.v v8, (a1)
+; RV32-BOTH-NEXT:    vse32.v v8, (a0)
+; RV32-BOTH-NEXT:    ret
+;
+; RV64-BOTH-LABEL: aligned_bzero_256:
+; RV64-BOTH:       # %bb.0:
+; RV64-BOTH-NEXT:    addi a1, a0, 128
+; RV64-BOTH-NEXT:    vsetivli zero, 16, e64, m8, ta, ma
+; RV64-BOTH-NEXT:    vmv.v.i v8, 0
+; RV64-BOTH-NEXT:    vse64.v v8, (a1)
+; RV64-BOTH-NEXT:    vse64.v v8, (a0)
+; RV64-BOTH-NEXT:    ret
+  tail call void @llvm.memset.inline.p0.i64(ptr align 64 %a, i8 0, i64 256, i1 0)
+  ret void
+}


        


More information about the llvm-commits mailing list