[llvm] d84b4e1 - [AArch64][x86] add tests for rotated store merge; NFC

Sanjay Patel via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 10 08:29:03 PDT 2020


Author: Sanjay Patel
Date: 2020-07-10T11:28:51-04:00
New Revision: d84b4e163da7fba26594f960fca10fa31f7c611a

URL: https://github.com/llvm/llvm-project/commit/d84b4e163da7fba26594f960fca10fa31f7c611a
DIFF: https://github.com/llvm/llvm-project/commit/d84b4e163da7fba26594f960fca10fa31f7c611a.diff

LOG: [AArch64][x86] add tests for rotated store merge; NFC

Added: 
    

Modified: 
    llvm/test/CodeGen/AArch64/merge-store-dependency.ll
    llvm/test/CodeGen/X86/stores-merging.ll

Removed: 
    


################################################################################
diff  --git a/llvm/test/CodeGen/AArch64/merge-store-dependency.ll b/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
index 5613db1e5214..77b7012d2ed1 100644
--- a/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
+++ b/llvm/test/CodeGen/AArch64/merge-store-dependency.ll
@@ -50,7 +50,6 @@ define void @test(%struct1* %fde, i32 %fd, void (i32, i32, i8*)* %func, i8* %arg
 ; A53-NEXT:    // =>This Inner Loop Header: Depth=1
 ; A53-NEXT:    b .LBB0_4
 entry:
-
   %0 = bitcast %struct1* %fde to i8*
   tail call void @llvm.memset.p0i8.i64(i8* align 8 %0, i8 0, i64 40, i1 false)
   %state = getelementptr inbounds %struct1, %struct1* %fde, i64 0, i32 4
@@ -96,6 +95,110 @@ exit:
   ret void
 }
 
+define void @rotate16_in_place(i8* %p) {
+; A53-LABEL: rotate16_in_place:
+; A53:       // %bb.0:
+; A53-NEXT:    ldrb w8, [x0, #1]
+; A53-NEXT:    ldrb w9, [x0]
+; A53-NEXT:    strb w8, [x0]
+; A53-NEXT:    strb w9, [x0, #1]
+; A53-NEXT:    ret
+  %p0 = getelementptr i8, i8* %p, i64 0
+  %p1 = getelementptr i8, i8* %p, i64 1
+  %i0 = load i8, i8* %p0, align 1
+  %i1 = load i8, i8* %p1, align 1
+  store i8 %i1, i8* %p0, align 1
+  store i8 %i0, i8* %p1, align 1
+  ret void
+}
+
+define void @rotate16(i8* %p, i8* %q) {
+; A53-LABEL: rotate16:
+; A53:       // %bb.0:
+; A53-NEXT:    ldrb w8, [x0, #1]
+; A53-NEXT:    ldrb w9, [x0]
+; A53-NEXT:    strb w8, [x1]
+; A53-NEXT:    strb w9, [x1, #1]
+; A53-NEXT:    ret
+  %p0 = getelementptr i8, i8* %p, i64 0
+  %p1 = getelementptr i8, i8* %p, i64 1
+  %q0 = getelementptr i8, i8* %q, i64 0
+  %q1 = getelementptr i8, i8* %q, i64 1
+  %i0 = load i8, i8* %p0, align 1
+  %i1 = load i8, i8* %p1, align 1
+  store i8 %i1, i8* %q0, align 1
+  store i8 %i0, i8* %q1, align 1
+  ret void
+}
+
+define void @rotate32_in_place(i16* %p) {
+; A53-LABEL: rotate32_in_place:
+; A53:       // %bb.0:
+; A53-NEXT:    ldrh w8, [x0, #2]
+; A53-NEXT:    ldrh w9, [x0]
+; A53-NEXT:    strh w8, [x0]
+; A53-NEXT:    strh w9, [x0, #2]
+; A53-NEXT:    ret
+  %p0 = getelementptr i16, i16* %p, i64 0
+  %p1 = getelementptr i16, i16* %p, i64 1
+  %i0 = load i16, i16* %p0, align 2
+  %i1 = load i16, i16* %p1, align 2
+  store i16 %i1, i16* %p0, align 2
+  store i16 %i0, i16* %p1, align 2
+  ret void
+}
+
+define void @rotate32(i16* %p) {
+; A53-LABEL: rotate32:
+; A53:       // %bb.0:
+; A53-NEXT:    ldrh w8, [x0, #2]
+; A53-NEXT:    ldrh w9, [x0]
+; A53-NEXT:    strh w8, [x0, #84]
+; A53-NEXT:    strh w9, [x0, #86]
+; A53-NEXT:    ret
+  %p0 = getelementptr i16, i16* %p, i64 0
+  %p1 = getelementptr i16, i16* %p, i64 1
+  %p42 = getelementptr i16, i16* %p, i64 42
+  %p43 = getelementptr i16, i16* %p, i64 43
+  %i0 = load i16, i16* %p0, align 2
+  %i1 = load i16, i16* %p1, align 2
+  store i16 %i1, i16* %p42, align 2
+  store i16 %i0, i16* %p43, align 2
+  ret void
+}
+
+define void @rotate64_in_place(i32* %p) {
+; A53-LABEL: rotate64_in_place:
+; A53:       // %bb.0:
+; A53-NEXT:    ldp w9, w8, [x0]
+; A53-NEXT:    stp w8, w9, [x0]
+; A53-NEXT:    ret
+  %p0 = getelementptr i32, i32* %p, i64 0
+  %p1 = getelementptr i32, i32* %p, i64 1
+  %i0 = load i32, i32* %p0, align 4
+  %i1 = load i32, i32* %p1, align 4
+  store i32 %i1, i32* %p0, align 4
+  store i32 %i0, i32* %p1, align 4
+  ret void
+}
+
+define void @rotate64(i32* %p) {
+; A53-LABEL: rotate64:
+; A53:       // %bb.0:
+; A53-NEXT:    ldp w9, w8, [x0]
+; A53-NEXT:    stp w8, w9, [x0, #8]
+; A53-NEXT:    ret
+  %p0 = getelementptr i32, i32* %p, i64 0
+  %p1 = getelementptr i32, i32* %p, i64 1
+  %p2 = getelementptr i32, i32* %p, i64 2
+  %p3 = getelementptr i32, i32* %p, i64 3
+  %i0 = load i32, i32* %p0, align 4
+  %i1 = load i32, i32* %p1, align 4
+  store i32 %i1, i32* %p2, align 4
+  store i32 %i0, i32* %p3, align 4
+  ret void
+}
+
 declare void @llvm.memset.p0i8.i64(i8* nocapture, i8, i64, i1)
 declare i32 @fcntl(i32, i32, ...)
 declare noalias i8* @foo()

diff  --git a/llvm/test/CodeGen/X86/stores-merging.ll b/llvm/test/CodeGen/X86/stores-merging.ll
index 6420ac7dc3ed..768684067f32 100644
--- a/llvm/test/CodeGen/X86/stores-merging.ll
+++ b/llvm/test/CodeGen/X86/stores-merging.ll
@@ -242,3 +242,200 @@ define void @pr43446_1(i8* %a) {
   store i1 true, i1* %b, align 1
   ret void
 }
+
+define void @rotate16_in_place(i8* %p) {
+; CHECK-LABEL: rotate16_in_place:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movb (%rdi), %al
+; CHECK-NEXT:    movb 1(%rdi), %cl
+; CHECK-NEXT:    movb %cl, (%rdi)
+; CHECK-NEXT:    movb %al, 1(%rdi)
+; CHECK-NEXT:    retq
+  %p0 = getelementptr i8, i8* %p, i64 0
+  %p1 = getelementptr i8, i8* %p, i64 1
+  %i0 = load i8, i8* %p0, align 1
+  %i1 = load i8, i8* %p1, align 1
+  store i8 %i1, i8* %p0, align 1
+  store i8 %i0, i8* %p1, align 1
+  ret void
+}
+
+define void @rotate16(i8* %p, i8* %q) {
+; CHECK-LABEL: rotate16:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movb (%rdi), %al
+; CHECK-NEXT:    movb 1(%rdi), %cl
+; CHECK-NEXT:    movb %cl, (%rsi)
+; CHECK-NEXT:    movb %al, 1(%rsi)
+; CHECK-NEXT:    retq
+  %p0 = getelementptr i8, i8* %p, i64 0
+  %p1 = getelementptr i8, i8* %p, i64 1
+  %q0 = getelementptr i8, i8* %q, i64 0
+  %q1 = getelementptr i8, i8* %q, i64 1
+  %i0 = load i8, i8* %p0, align 1
+  %i1 = load i8, i8* %p1, align 1
+  store i8 %i1, i8* %q0, align 1
+  store i8 %i0, i8* %q1, align 1
+  ret void
+}
+
+define void @rotate32_in_place(i16* %p) {
+; CHECK-LABEL: rotate32_in_place:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movzwl (%rdi), %eax
+; CHECK-NEXT:    movzwl 2(%rdi), %ecx
+; CHECK-NEXT:    movw %cx, (%rdi)
+; CHECK-NEXT:    movw %ax, 2(%rdi)
+; CHECK-NEXT:    retq
+  %p0 = getelementptr i16, i16* %p, i64 0
+  %p1 = getelementptr i16, i16* %p, i64 1
+  %i0 = load i16, i16* %p0, align 2
+  %i1 = load i16, i16* %p1, align 2
+  store i16 %i1, i16* %p0, align 2
+  store i16 %i0, i16* %p1, align 2
+  ret void
+}
+
+define void @rotate32(i16* %p) {
+; CHECK-LABEL: rotate32:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movzwl (%rdi), %eax
+; CHECK-NEXT:    movzwl 2(%rdi), %ecx
+; CHECK-NEXT:    movw %cx, 84(%rdi)
+; CHECK-NEXT:    movw %ax, 86(%rdi)
+; CHECK-NEXT:    retq
+  %p0 = getelementptr i16, i16* %p, i64 0
+  %p1 = getelementptr i16, i16* %p, i64 1
+  %p42 = getelementptr i16, i16* %p, i64 42
+  %p43 = getelementptr i16, i16* %p, i64 43
+  %i0 = load i16, i16* %p0, align 2
+  %i1 = load i16, i16* %p1, align 2
+  store i16 %i1, i16* %p42, align 2
+  store i16 %i0, i16* %p43, align 2
+  ret void
+}
+
+define void @rotate64_in_place(i32* %p) {
+; CHECK-LABEL: rotate64_in_place:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl (%rdi), %eax
+; CHECK-NEXT:    movl 4(%rdi), %ecx
+; CHECK-NEXT:    movl %ecx, (%rdi)
+; CHECK-NEXT:    movl %eax, 4(%rdi)
+; CHECK-NEXT:    retq
+  %p0 = getelementptr i32, i32* %p, i64 0
+  %p1 = getelementptr i32, i32* %p, i64 1
+  %i0 = load i32, i32* %p0, align 4
+  %i1 = load i32, i32* %p1, align 4
+  store i32 %i1, i32* %p0, align 4
+  store i32 %i0, i32* %p1, align 4
+  ret void
+}
+
+define void @rotate64(i32* %p) {
+; CHECK-LABEL: rotate64:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl (%rdi), %eax
+; CHECK-NEXT:    movl 4(%rdi), %ecx
+; CHECK-NEXT:    movl %ecx, 8(%rdi)
+; CHECK-NEXT:    movl %eax, 12(%rdi)
+; CHECK-NEXT:    retq
+  %p0 = getelementptr i32, i32* %p, i64 0
+  %p1 = getelementptr i32, i32* %p, i64 1
+  %p2 = getelementptr i32, i32* %p, i64 2
+  %p3 = getelementptr i32, i32* %p, i64 3
+  %i0 = load i32, i32* %p0, align 4
+  %i1 = load i32, i32* %p1, align 4
+  store i32 %i1, i32* %p2, align 4
+  store i32 %i0, i32* %p3, align 4
+  ret void
+}
+
+define void @rotate64_iterate(i16* %p) {
+; CHECK-LABEL: rotate64_iterate:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movl (%rdi), %eax
+; CHECK-NEXT:    movl 4(%rdi), %ecx
+; CHECK-NEXT:    movl %ecx, 84(%rdi)
+; CHECK-NEXT:    movl %eax, 88(%rdi)
+; CHECK-NEXT:    retq
+  %p0 = getelementptr i16, i16* %p, i64 0
+  %p1 = getelementptr i16, i16* %p, i64 1
+  %p2 = getelementptr i16, i16* %p, i64 2
+  %p3 = getelementptr i16, i16* %p, i64 3
+  %p42 = getelementptr i16, i16* %p, i64 42
+  %p43 = getelementptr i16, i16* %p, i64 43
+  %p44 = getelementptr i16, i16* %p, i64 44
+  %p45 = getelementptr i16, i16* %p, i64 45
+  %i0 = load i16, i16* %p0, align 2
+  %i1 = load i16, i16* %p1, align 2
+  %i2 = load i16, i16* %p2, align 2
+  %i3 = load i16, i16* %p3, align 2
+  store i16 %i2, i16* %p42, align 2
+  store i16 %i3, i16* %p43, align 2
+  store i16 %i0, i16* %p44, align 2
+  store i16 %i1, i16* %p45, align 2
+  ret void
+}
+
+define void @rotate32_consecutive(i16* %p) {
+; CHECK-LABEL: rotate32_consecutive:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movzwl (%rdi), %eax
+; CHECK-NEXT:    movzwl 2(%rdi), %ecx
+; CHECK-NEXT:    movzwl 4(%rdi), %edx
+; CHECK-NEXT:    movzwl 6(%rdi), %esi
+; CHECK-NEXT:    movw %cx, 84(%rdi)
+; CHECK-NEXT:    movw %ax, 86(%rdi)
+; CHECK-NEXT:    movw %si, 88(%rdi)
+; CHECK-NEXT:    movw %dx, 90(%rdi)
+; CHECK-NEXT:    retq
+  %p0 = getelementptr i16, i16* %p, i64 0
+  %p1 = getelementptr i16, i16* %p, i64 1
+  %p2 = getelementptr i16, i16* %p, i64 2
+  %p3 = getelementptr i16, i16* %p, i64 3
+  %p42 = getelementptr i16, i16* %p, i64 42
+  %p43 = getelementptr i16, i16* %p, i64 43
+  %p44 = getelementptr i16, i16* %p, i64 44
+  %p45 = getelementptr i16, i16* %p, i64 45
+  %i0 = load i16, i16* %p0, align 2
+  %i1 = load i16, i16* %p1, align 2
+  %i2 = load i16, i16* %p2, align 2
+  %i3 = load i16, i16* %p3, align 2
+  store i16 %i1, i16* %p42, align 2
+  store i16 %i0, i16* %p43, align 2
+  store i16 %i3, i16* %p44, align 2
+  store i16 %i2, i16* %p45, align 2
+  ret void
+}
+
+define void @rotate32_twice(i16* %p) {
+; CHECK-LABEL: rotate32_twice:
+; CHECK:       # %bb.0:
+; CHECK-NEXT:    movzwl (%rdi), %eax
+; CHECK-NEXT:    movzwl 2(%rdi), %ecx
+; CHECK-NEXT:    movzwl 4(%rdi), %edx
+; CHECK-NEXT:    movzwl 6(%rdi), %esi
+; CHECK-NEXT:    movw %cx, 84(%rdi)
+; CHECK-NEXT:    movw %ax, 86(%rdi)
+; CHECK-NEXT:    movw %si, 108(%rdi)
+; CHECK-NEXT:    movw %dx, 110(%rdi)
+; CHECK-NEXT:    retq
+  %p0 = getelementptr i16, i16* %p, i64 0
+  %p1 = getelementptr i16, i16* %p, i64 1
+  %p2 = getelementptr i16, i16* %p, i64 2
+  %p3 = getelementptr i16, i16* %p, i64 3
+  %p42 = getelementptr i16, i16* %p, i64 42
+  %p43 = getelementptr i16, i16* %p, i64 43
+  %p54 = getelementptr i16, i16* %p, i64 54
+  %p55 = getelementptr i16, i16* %p, i64 55
+  %i0 = load i16, i16* %p0, align 2
+  %i1 = load i16, i16* %p1, align 2
+  %i2 = load i16, i16* %p2, align 2
+  %i3 = load i16, i16* %p3, align 2
+  store i16 %i1, i16* %p42, align 2
+  store i16 %i0, i16* %p43, align 2
+  store i16 %i3, i16* %p54, align 2
+  store i16 %i2, i16* %p55, align 2
+  ret void
+}


        


More information about the llvm-commits mailing list