[llvm] r341406 - NFC: expand memset inline arm64 coverage

JF Bastien via llvm-commits llvm-commits at lists.llvm.org
Tue Sep 4 14:02:00 PDT 2018


Author: jfb
Date: Tue Sep  4 14:02:00 2018
New Revision: 341406

URL: http://llvm.org/viewvc/llvm-project?rev=341406&view=rev
Log:
NFC: expand memset inline arm64 coverage

I'm looking at some codegen optimization in this area and want to make sure I understand the current codegen and don't regress it. This patch simply expands the two existing tests to capture more of the current code generation when it comes to heap-based and stack-based small memset on arm64. The tested code is already pretty good, notably when it comes to using STP, FP stores, FP immediate generation, and folding one of the stores into a stack spill when possible. The uses of STUR could be improved, and some more pairing could occur. Straying from bzero patterns currently yield suboptimal code, and I expect a variety of small changes could make things way better.

Modified:
    llvm/trunk/test/CodeGen/AArch64/arm64-memset-inline.ll

Modified: llvm/trunk/test/CodeGen/AArch64/arm64-memset-inline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/arm64-memset-inline.ll?rev=341406&r1=341405&r2=341406&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/arm64-memset-inline.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/arm64-memset-inline.ll Tue Sep  4 14:02:00 2018
@@ -1,27 +1,224 @@
 ; RUN: llc < %s -mtriple=arm64-eabi | FileCheck %s
 
-define void @t1(i8* nocapture %c) nounwind optsize {
-entry:
-; CHECK-LABEL: t1:
-; CHECK: str wzr, [x0, #8]
-; CHECK: str xzr, [x0]
+define void @bzero_4_heap(i8* nocapture %c) {
+; CHECK-LABEL: bzero_4_heap:
+; CHECK:       str wzr, [x0]
+; CHECK-NEXT:  ret
+  call void @llvm.memset.p0i8.i64(i8* align 4 %c, i8 0, i64 4, i1 false)
+  ret void
+}
+
+define void @bzero_8_heap(i8* nocapture %c) {
+; CHECK-LABEL: bzero_8_heap:
+; CHECK:       str xzr, [x0]
+; CHECK-NEXT:  ret
+  call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 8, i1 false)
+  ret void
+}
+
+define void @bzero_12_heap(i8* nocapture %c) {
+; CHECK-LABEL: bzero_12_heap:
+; CHECK:       str wzr, [x0, #8]
+; CHECK-NEXT:  str xzr, [x0]
+; CHECK-NEXT:  ret
   call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 12, i1 false)
   ret void
 }
 
-define void @t2() nounwind ssp {
-entry:
-; CHECK-LABEL: t2:
-; CHECK: stp xzr, xzr, [sp, #16]
-; CHECK: strh wzr, [sp, #32]
-; CHECK: str xzr, [sp, #8]
+define void @bzero_16_heap(i8* nocapture %c) {
+; CHECK-LABEL: bzero_16_heap:
+; CHECK:       stp xzr, xzr, [x0]
+; CHECK-NEXT:  ret
+  call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 16, i1 false)
+  ret void
+}
+
+define void @bzero_32_heap(i8* nocapture %c) {
+; CHECK-LABEL: bzero_32_heap:
+; CHECK:       movi v0.2d, #0000000000000000
+; CHECK-NEXT:  stp q0, q0, [x0]
+; CHECK-NEXT:  ret
+  call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 32, i1 false)
+  ret void
+}
+
+define void @bzero_64_heap(i8* nocapture %c) {
+; CHECK-LABEL: bzero_64_heap:
+; CHECK:       movi v0.2d, #0000000000000000
+; CHECK-NEXT:  stp q0, q0, [x0, #32]
+; CHECK-NEXT:  stp q0, q0, [x0]
+; CHECK-NEXT:  ret
+  call void @llvm.memset.p0i8.i64(i8* align 8 %c, i8 0, i64 64, i1 false)
+  ret void
+}
+
+define void @bzero_4_stack() {
+; CHECK-LABEL: bzero_4_stack:
+; CHECK:       str wzr, [sp, #12]
+; CHECK-NEXT:  bl something
+  %buf = alloca [4 x i8], align 1
+  %cast = bitcast [4 x i8]* %buf to i8*
+  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 4, i1 false)
+  call void @something(i8* %cast)
+  ret void
+}
+
+define void @bzero_8_stack() {
+; CHECK-LABEL: bzero_8_stack:
+; CHECK:       str xzr, [sp, #8]
+; CHECK-NEXT:  bl something
+  %buf = alloca [8 x i8], align 1
+  %cast = bitcast [8 x i8]* %buf to i8*
+  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 8, i1 false)
+  call void @something(i8* %cast)
+  ret void
+}
+
+define void @bzero_12_stack() {
+; CHECK-LABEL: bzero_12_stack:
+; CHECK:       str wzr, [sp, #8]
+; CHECK-NEXT:  str xzr, [sp]
+; CHECK-NEXT:  bl something
+  %buf = alloca [12 x i8], align 1
+  %cast = bitcast [12 x i8]* %buf to i8*
+  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 12, i1 false)
+  call void @something(i8* %cast)
+  ret void
+}
+
+define void @bzero_16_stack() {
+; CHECK-LABEL: bzero_16_stack:
+; CHECK:       stp xzr, x30, [sp, #8]
+; CHECK:       mov x0, sp
+; CHECK:       str xzr, [sp]
+; CHECK-NEXT:  bl something
+  %buf = alloca [16 x i8], align 1
+  %cast = bitcast [16 x i8]* %buf to i8*
+  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 16, i1 false)
+  call void @something(i8* %cast)
+  ret void
+}
+
+define void @bzero_20_stack() {
+; CHECK-LABEL: bzero_20_stack:
+; CHECK:       stp xzr, xzr, [sp, #8]
+; CHECK-NEXT:  str wzr, [sp, #24]
+; CHECK-NEXT:  bl something
+  %buf = alloca [20 x i8], align 1
+  %cast = bitcast [20 x i8]* %buf to i8*
+  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 20, i1 false)
+  call void @something(i8* %cast)
+  ret void
+}
+
+define void @bzero_26_stack() {
+; CHECK-LABEL: bzero_26_stack:
+; CHECK:       stp xzr, xzr, [sp]
+; CHECK-NEXT:  strh wzr, [sp, #24]
+; CHECK-NEXT:  str xzr, [sp, #16]
+; CHECK-NEXT:  bl something
   %buf = alloca [26 x i8], align 1
-  %0 = getelementptr inbounds [26 x i8], [26 x i8]* %buf, i32 0, i32 0
-  call void @llvm.memset.p0i8.i32(i8* %0, i8 0, i32 26, i1 false)
-  call void @something(i8* %0) nounwind
+  %cast = bitcast [26 x i8]* %buf to i8*
+  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 26, i1 false)
+  call void @something(i8* %cast)
+  ret void
+}
+
+define void @bzero_32_stack() {
+; CHECK-LABEL: bzero_32_stack:
+; CHECK:       movi v0.2d, #0000000000000000
+; CHECK-NEXT:  mov x0, sp
+; CHECK-NEXT:  stp q0, q0, [sp]
+; CHECK-NEXT:  bl something
+  %buf = alloca [32 x i8], align 1
+  %cast = bitcast [32 x i8]* %buf to i8*
+  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 32, i1 false)
+  call void @something(i8* %cast)
+  ret void
+}
+
+define void @bzero_40_stack() {
+; CHECK-LABEL: bzero_40_stack:
+; CHECK:      stp xzr, x30, [sp, #40]
+; CHECK:      movi v0.2d, #0000000000000000
+; CHECK-NEXT: add x0, sp, #8
+; CHECK-NEXT: stur q0, [sp, #24]
+; CHECK-NEXT: stur q0, [sp, #8]
+; CHECK-NEXT: bl something
+  %buf = alloca [40 x i8], align 1
+  %cast = bitcast [40 x i8]* %buf to i8*
+  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 40, i1 false)
+  call void @something(i8* %cast)
+  ret void
+}
+
+define void @bzero_64_stack() {
+; CHECK-LABEL: bzero_64_stack:
+; CHECK:       movi v0.2d, #0000000000000000
+; CHECK-NEXT:  mov x0, sp
+; CHECK-NEXT:  stp q0, q0, [sp, #32]
+; CHECK-NEXT:  stp q0, q0, [sp]
+; CHECK-NEXT:  bl something
+  %buf = alloca [64 x i8], align 1
+  %cast = bitcast [64 x i8]* %buf to i8*
+  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 64, i1 false)
+  call void @something(i8* %cast)
+  ret void
+}
+
+define void @bzero_72_stack() {
+; CHECK-LABEL: bzero_72_stack:
+; CHECK:       stp xzr, x30, [sp, #72]
+; CHECK:       movi v0.2d, #0000000000000000
+; CHECK-NEXT:  x0, sp, #8
+; CHECK-NEXT:  stur q0, [sp, #56]
+; CHECK-NEXT:  stur q0, [sp, #40]
+; CHECK-NEXT:  stur q0, [sp, #24]
+; CHECK-NEXT:  stur q0, [sp, #8]
+; CHECK-NEXT:  bl something
+  %buf = alloca [72 x i8], align 1
+  %cast = bitcast [72 x i8]* %buf to i8*
+  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 72, i1 false)
+  call void @something(i8* %cast)
+  ret void
+}
+
+define void @bzero_128_stack() {
+; CHECK-LABEL: bzero_128_stack:
+; CHECK:       movi v0.2d, #0000000000000000
+; CHECK-NEXT:  mov x0, sp
+; CHECK-NEXT:  stp q0, q0, [sp, #96]
+; CHECK-NEXT:  stp q0, q0, [sp, #64]
+; CHECK-NEXT:  stp q0, q0, [sp, #32]
+; CHECK-NEXT:  stp q0, q0, [sp]
+; CHECK-NEXT:  bl something
+  %buf = alloca [128 x i8], align 1
+  %cast = bitcast [128 x i8]* %buf to i8*
+  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 128, i1 false)
+  call void @something(i8* %cast)
+  ret void
+}
+
+define void @bzero_256_stack() {
+; CHECK-LABEL: bzero_256_stack:
+; CHECK:       movi v0.2d, #0000000000000000
+; CHECK-NEXT:  mov x0, sp
+; CHECK-NEXT:  stp q0, q0, [sp, #224]
+; CHECK-NEXT:  stp q0, q0, [sp, #192]
+; CHECK-NEXT:  stp q0, q0, [sp, #160]
+; CHECK-NEXT:  stp q0, q0, [sp, #128]
+; CHECK-NEXT:  stp q0, q0, [sp, #96]
+; CHECK-NEXT:  stp q0, q0, [sp, #64]
+; CHECK-NEXT:  stp q0, q0, [sp, #32]
+; CHECK-NEXT:  stp q0, q0, [sp]
+; CHECK-NEXT:  bl something
+  %buf = alloca [256 x i8], align 1
+  %cast = bitcast [256 x i8]* %buf to i8*
+  call void @llvm.memset.p0i8.i32(i8* %cast, i8 0, i32 256, i1 false)
+  call void @something(i8* %cast)
   ret void
 }
 
-declare void @something(i8*) nounwind
+declare void @something(i8*)
 declare void @llvm.memset.p0i8.i32(i8* nocapture, i8, i32, i1) nounwind
 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1) nounwind




More information about the llvm-commits mailing list