[llvm] r349338 - [AArch64] Re-run load/store optimizer after aggressive tail duplication
Alexandros Lamprineas via llvm-commits
llvm-commits at lists.llvm.org
Mon Dec 17 02:45:43 PST 2018
Author: alelab01
Date: Mon Dec 17 02:45:43 2018
New Revision: 349338
URL: http://llvm.org/viewvc/llvm-project?rev=349338&view=rev
Log:
[AArch64] Re-run load/store optimizer after aggressive tail duplication
The Load/Store Optimizer runs before Machine Block Placement. At O3 the
Tail Duplication Threshold is set to 4 instructions and this can create
new opportunities for the Load/Store Optimizer. It seems worthwhile to
run it once again.
Added:
llvm/trunk/test/CodeGen/AArch64/ldst-opt-after-block-placement.ll
Modified:
llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.cpp
llvm/trunk/test/CodeGen/AArch64/O3-pipeline.ll
Modified: llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.cpp?rev=349338&r1=349337&r2=349338&view=diff
==============================================================================
--- llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.cpp (original)
+++ llvm/trunk/lib/Target/AArch64/AArch64TargetMachine.cpp Mon Dec 17 02:45:43 2018
@@ -556,6 +556,12 @@ void AArch64PassConfig::addPreSched2() {
}
void AArch64PassConfig::addPreEmitPass() {
+ // Machine Block Placement might have created new opportunities when run
+ // at O3, where the Tail Duplication Threshold is set to 4 instructions.
+ // Run the load/store optimizer once more.
+ if (TM->getOptLevel() >= CodeGenOpt::Aggressive && EnableLoadStoreOpt)
+ addPass(createAArch64LoadStoreOptimizationPass());
+
if (EnableA53Fix835769)
addPass(createAArch64A53Fix835769());
// Relax conditional branch instructions if they're otherwise out of
Modified: llvm/trunk/test/CodeGen/AArch64/O3-pipeline.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/O3-pipeline.ll?rev=349338&r1=349337&r2=349338&view=diff
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/O3-pipeline.ll (original)
+++ llvm/trunk/test/CodeGen/AArch64/O3-pipeline.ll Mon Dec 17 02:45:43 2018
@@ -154,6 +154,7 @@
; CHECK-NEXT: Machine Block Frequency Analysis
; CHECK-NEXT: MachinePostDominator Tree Construction
; CHECK-NEXT: Branch Probability Basic Block Placement
+; CHECK-NEXT: AArch64 load / store optimization pass
; CHECK-NEXT: Branch relaxation pass
; CHECK-NEXT: AArch64 Branch Targets
; CHECK-NEXT: AArch64 Compress Jump Tables
Added: llvm/trunk/test/CodeGen/AArch64/ldst-opt-after-block-placement.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/CodeGen/AArch64/ldst-opt-after-block-placement.ll?rev=349338&view=auto
==============================================================================
--- llvm/trunk/test/CodeGen/AArch64/ldst-opt-after-block-placement.ll (added)
+++ llvm/trunk/test/CodeGen/AArch64/ldst-opt-after-block-placement.ll Mon Dec 17 02:45:43 2018
@@ -0,0 +1,51 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -O3 -mtriple=aarch64-arm < %s | FileCheck %s
+
+; Run at O3 to make sure we can optimize load/store instructions after Machine
+; Block Placement takes place using Tail Duplication Threshold = 4.
+
+define void @foo(i1 %cond, i64* %ptr) {
+; CHECK-LABEL: foo:
+; CHECK: // %bb.0: // %entry
+; CHECK-NEXT: tbz w0, #0, .LBB0_2
+; CHECK-NEXT: // %bb.1: // %if.then
+; CHECK-NEXT: ldp x9, x8, [x1, #8]
+; CHECK-NEXT: str xzr, [x1, #16]
+; CHECK-NEXT: cmp x8, x9
+; CHECK-NEXT: b.lt .LBB0_3
+; CHECK-NEXT: b .LBB0_4
+; CHECK-NEXT: .LBB0_2: // %if.else
+; CHECK-NEXT: ldp x8, x9, [x1]
+; CHECK-NEXT: cmp x8, x9
+; CHECK-NEXT: b.ge .LBB0_4
+; CHECK-NEXT: .LBB0_3: // %exit1
+; CHECK-NEXT: str xzr, [x1, #8]
+; CHECK-NEXT: .LBB0_4: // %exit2
+; CHECK-NEXT: ret
+entry:
+ br i1 %cond, label %if.then, label %if.else
+
+if.then:
+ %0 = getelementptr inbounds i64, i64* %ptr, i64 2
+ %1 = load i64, i64* %0, align 8
+ store i64 0, i64* %0, align 8
+ br label %if.end
+
+if.else:
+ %2 = load i64, i64* %ptr, align 8
+ br label %if.end
+
+if.end:
+ %3 = phi i64 [ %1, %if.then ], [ %2, %if.else ]
+ %4 = getelementptr inbounds i64, i64* %ptr, i64 1
+ %5 = load i64, i64* %4, align 8
+ %6 = icmp slt i64 %3, %5
+ br i1 %6, label %exit1, label %exit2
+
+exit1:
+ store i64 0, i64* %4, align 8
+ ret void
+
+exit2:
+ ret void
+}
More information about the llvm-commits
mailing list