[llvm-branch-commits] [llvm] b9b62c2 - [AArch64] Add a test for MachineLICM SinkIntoLoop. NFC.
Sjoerd Meijer via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Dec 22 04:28:12 PST 2020
Author: Sjoerd Meijer
Date: 2020-12-22T12:22:24Z
New Revision: b9b62c28677d2c812604e29bab27c1e2a2144e4b
URL: https://github.com/llvm/llvm-project/commit/b9b62c28677d2c812604e29bab27c1e2a2144e4b
DIFF: https://github.com/llvm/llvm-project/commit/b9b62c28677d2c812604e29bab27c1e2a2144e4b.diff
LOG: [AArch64] Add a test for MachineLICM SinkIntoLoop. NFC.
Added:
llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
Modified:
Removed:
################################################################################
diff --git a/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll b/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
new file mode 100644
index 000000000000..f8d53a574dd2
--- /dev/null
+++ b/llvm/test/CodeGen/AArch64/machine-licm-sink-instr.ll
@@ -0,0 +1,176 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc < %s -mtriple=aarch64 -sink-insts-to-avoid-spills | FileCheck %s
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+
+ at A = external dso_local global [100 x i32], align 4
+
+define i32 @sink_load_and_copy(i32 %n) {
+; CHECK-LABEL: sink_load_and_copy:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w21, -24
+; CHECK-NEXT: .cfi_offset w30, -32
+; CHECK-NEXT: mov w19, w0
+; CHECK-NEXT: cmp w0, #1 // =1
+; CHECK-NEXT: b.lt .LBB0_3
+; CHECK-NEXT: // %bb.1: // %for.body.preheader
+; CHECK-NEXT: adrp x8, A
+; CHECK-NEXT: ldr w21, [x8, :lo12:A]
+; CHECK-NEXT: mov w20, w19
+; CHECK-NEXT: .LBB0_2: // %for.body
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: mov w0, w21
+; CHECK-NEXT: bl _Z3usei
+; CHECK-NEXT: subs w19, w19, #1 // =1
+; CHECK-NEXT: sdiv w20, w20, w0
+; CHECK-NEXT: b.ne .LBB0_2
+; CHECK-NEXT: b .LBB0_4
+; CHECK-NEXT: .LBB0_3:
+; CHECK-NEXT: mov w20, w19
+; CHECK-NEXT: .LBB0_4: // %for.cond.cleanup
+; CHECK-NEXT: mov w0, w20
+; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %cmp63 = icmp sgt i32 %n, 0
+ br i1 %cmp63, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %0 = load i32, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @A, i64 0, i64 0), align 4
+ br label %for.body
+
+for.cond.cleanup:
+ %sum.0.lcssa = phi i32 [ %n, %entry ], [ %div, %for.body ]
+ ret i32 %sum.0.lcssa
+
+for.body:
+ %lsr.iv = phi i32 [ %n, %for.body.preheader ], [ %lsr.iv.next, %for.body ]
+ %sum.065 = phi i32 [ %div, %for.body ], [ %n, %for.body.preheader ]
+ %call = tail call i32 @_Z3usei(i32 %0)
+ %div = sdiv i32 %sum.065, %call
+ %lsr.iv.next = add i32 %lsr.iv, -1
+ %exitcond.not = icmp eq i32 %lsr.iv.next, 0
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+define i32 @cant_sink_successive_call(i32 %n) {
+; CHECK-LABEL: cant_sink_successive_call:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w21, -24
+; CHECK-NEXT: .cfi_offset w30, -32
+; CHECK-NEXT: mov w19, w0
+; CHECK-NEXT: cmp w0, #1 // =1
+; CHECK-NEXT: b.lt .LBB1_3
+; CHECK-NEXT: // %bb.1: // %for.body.preheader
+; CHECK-NEXT: adrp x8, A
+; CHECK-NEXT: ldr w20, [x8, :lo12:A]
+; CHECK-NEXT: mov w0, w19
+; CHECK-NEXT: bl _Z3usei
+; CHECK-NEXT: mov w21, w19
+; CHECK-NEXT: .LBB1_2: // %for.body
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: mov w0, w20
+; CHECK-NEXT: bl _Z3usei
+; CHECK-NEXT: subs w19, w19, #1 // =1
+; CHECK-NEXT: sdiv w21, w21, w0
+; CHECK-NEXT: b.ne .LBB1_2
+; CHECK-NEXT: b .LBB1_4
+; CHECK-NEXT: .LBB1_3:
+; CHECK-NEXT: mov w21, w19
+; CHECK-NEXT: .LBB1_4: // %for.cond.cleanup
+; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: mov w0, w21
+; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %cmp63 = icmp sgt i32 %n, 0
+ br i1 %cmp63, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %0 = load i32, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @A, i64 0, i64 0), align 4
+ %call0 = tail call i32 @_Z3usei(i32 %n)
+ br label %for.body
+
+for.cond.cleanup:
+ %sum.0.lcssa = phi i32 [ %n, %entry ], [ %div, %for.body ]
+ ret i32 %sum.0.lcssa
+
+for.body:
+ %lsr.iv = phi i32 [ %n, %for.body.preheader ], [ %lsr.iv.next, %for.body ]
+ %sum.065 = phi i32 [ %div, %for.body ], [ %n, %for.body.preheader ]
+ %call = tail call i32 @_Z3usei(i32 %0)
+ %div = sdiv i32 %sum.065, %call
+ %lsr.iv.next = add i32 %lsr.iv, -1
+ %exitcond.not = icmp eq i32 %lsr.iv.next, 0
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+define i32 @cant_sink_successive_store(i32* nocapture readnone %store, i32 %n) {
+; CHECK-LABEL: cant_sink_successive_store:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: stp x30, x21, [sp, #-32]! // 16-byte Folded Spill
+; CHECK-NEXT: stp x20, x19, [sp, #16] // 16-byte Folded Spill
+; CHECK-NEXT: .cfi_def_cfa_offset 32
+; CHECK-NEXT: .cfi_offset w19, -8
+; CHECK-NEXT: .cfi_offset w20, -16
+; CHECK-NEXT: .cfi_offset w21, -24
+; CHECK-NEXT: .cfi_offset w30, -32
+; CHECK-NEXT: mov w19, w1
+; CHECK-NEXT: cmp w1, #1 // =1
+; CHECK-NEXT: b.lt .LBB2_3
+; CHECK-NEXT: // %bb.1: // %for.body.preheader
+; CHECK-NEXT: adrp x8, A
+; CHECK-NEXT: ldr w20, [x8, :lo12:A]
+; CHECK-NEXT: mov w8, #42
+; CHECK-NEXT: mov w21, w19
+; CHECK-NEXT: str w8, [x0]
+; CHECK-NEXT: .LBB2_2: // %for.body
+; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
+; CHECK-NEXT: mov w0, w20
+; CHECK-NEXT: bl _Z3usei
+; CHECK-NEXT: subs w19, w19, #1 // =1
+; CHECK-NEXT: sdiv w21, w21, w0
+; CHECK-NEXT: b.ne .LBB2_2
+; CHECK-NEXT: b .LBB2_4
+; CHECK-NEXT: .LBB2_3:
+; CHECK-NEXT: mov w21, w19
+; CHECK-NEXT: .LBB2_4: // %for.cond.cleanup
+; CHECK-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
+; CHECK-NEXT: mov w0, w21
+; CHECK-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
+; CHECK-NEXT: ret
+entry:
+ %cmp63 = icmp sgt i32 %n, 0
+ br i1 %cmp63, label %for.body.preheader, label %for.cond.cleanup
+
+for.body.preheader:
+ %0 = load i32, i32* getelementptr inbounds ([100 x i32], [100 x i32]* @A, i64 0, i64 0), align 4
+ store i32 42, i32* %store, align 4
+ br label %for.body
+
+for.cond.cleanup:
+ %sum.0.lcssa = phi i32 [ %n, %entry ], [ %div, %for.body ]
+ ret i32 %sum.0.lcssa
+
+for.body:
+ %lsr.iv = phi i32 [ %n, %for.body.preheader ], [ %lsr.iv.next, %for.body ]
+ %sum.065 = phi i32 [ %div, %for.body ], [ %n, %for.body.preheader ]
+ %call = tail call i32 @_Z3usei(i32 %0)
+ %div = sdiv i32 %sum.065, %call
+ %lsr.iv.next = add i32 %lsr.iv, -1
+ %exitcond.not = icmp eq i32 %lsr.iv.next, 0
+ br i1 %exitcond.not, label %for.cond.cleanup, label %for.body
+}
+
+declare i32 @_Z3usei(i32)
More information about the llvm-branch-commits
mailing list