[llvm] r268804 - [GVN] PRE of unordered loads
Philip Reames via llvm-commits
llvm-commits at lists.llvm.org
Fri May 6 14:43:52 PDT 2016
Author: reames
Date: Fri May 6 16:43:51 2016
New Revision: 268804
URL: http://llvm.org/viewvc/llvm-project?rev=268804&view=rev
Log:
[GVN] PRE of unordered loads
Again, fairly simple. Only change is ensuring that we actually copy the property of the load correctly. The aliasing legality constraints were already handled by the FRE patches. There's nothing special about unorder atomics from the perspective of the PRE algorithm itself.
Modified:
llvm/trunk/lib/Transforms/Scalar/GVN.cpp
llvm/trunk/test/Transforms/GVN/atomic.ll
Modified: llvm/trunk/lib/Transforms/Scalar/GVN.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Transforms/Scalar/GVN.cpp?rev=268804&r1=268803&r2=268804&view=diff
==============================================================================
--- llvm/trunk/lib/Transforms/Scalar/GVN.cpp (original)
+++ llvm/trunk/lib/Transforms/Scalar/GVN.cpp Fri May 6 16:43:51 2016
@@ -1552,9 +1552,10 @@ bool GVN::PerformLoadPRE(LoadInst *LI, A
BasicBlock *UnavailablePred = PredLoad.first;
Value *LoadPtr = PredLoad.second;
- Instruction *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre", false,
- LI->getAlignment(),
- UnavailablePred->getTerminator());
+ auto *NewLoad = new LoadInst(LoadPtr, LI->getName()+".pre",
+ LI->isVolatile(), LI->getAlignment(),
+ LI->getOrdering(), LI->getSynchScope(),
+ UnavailablePred->getTerminator());
// Transfer the old load's AA tags to the new load.
AAMDNodes Tags;
@@ -1664,11 +1665,6 @@ bool GVN::processNonLocalLoad(LoadInst *
return true;
}
- // This code hasn't been audited for atomic, ordered, or volatile memory
- // access.
- if (!LI->isSimple())
- return false;
-
// Step 4: Eliminate partial redundancy.
if (!EnablePRE || !EnableLoadPRE)
return false;
Modified: llvm/trunk/test/Transforms/GVN/atomic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/GVN/atomic.ll?rev=268804&r1=268803&r2=268804&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/GVN/atomic.ll (original)
+++ llvm/trunk/test/Transforms/GVN/atomic.ll Fri May 6 16:43:51 2016
@@ -392,3 +392,112 @@ next:
%res = sub i32 %a, %b
ret i32 %res
}
+
+declare void @clobber()
+
+; unordered atomic to unordered atomic
+define i32 @non_local_pre(i32* %P1) {
+; CHECK-LABEL: @non_local_pre(
+; CHECK: load atomic i32, i32* %P1 unordered
+; CHECK: load atomic i32, i32* %P1 unordered
+; CHECK: %b = phi i32 [ %b.pre, %early ], [ %a, %0 ]
+; CHECK: ret i32 %b
+ %a = load atomic i32, i32* %P1 unordered, align 4
+ %cmp = icmp eq i32 %a, 0
+ br i1 %cmp, label %early, label %next
+early:
+ call void @clobber()
+ br label %next
+next:
+ %b = load atomic i32, i32* %P1 unordered, align 4
+ ret i32 %b
+}
+
+; unordered atomic to non-atomic
+define i32 @non_local_pre2(i32* %P1) {
+; CHECK-LABEL: @non_local_pre2(
+; CHECK: load atomic i32, i32* %P1 unordered
+; CHECK: load i32, i32* %P1
+; CHECK: %b = phi i32 [ %b.pre, %early ], [ %a, %0 ]
+; CHECK: ret i32 %b
+ %a = load atomic i32, i32* %P1 unordered, align 4
+ %cmp = icmp eq i32 %a, 0
+ br i1 %cmp, label %early, label %next
+early:
+ call void @clobber()
+ br label %next
+next:
+ %b = load i32, i32* %P1
+ ret i32 %b
+}
+
+; non-atomic to unordered atomic - can't forward!
+define i32 @non_local_pre3(i32* %P1) {
+; CHECK-LABEL: @non_local_pre3(
+; CHECK: %a = load i32, i32* %P1
+; CHECK: %b = load atomic i32, i32* %P1 unordered
+; CHECK: ret i32 %b
+ %a = load i32, i32* %P1
+ %cmp = icmp eq i32 %a, 0
+ br i1 %cmp, label %early, label %next
+early:
+ call void @clobber()
+ br label %next
+next:
+ %b = load atomic i32, i32* %P1 unordered, align 4
+ ret i32 %b
+}
+
+; ordered atomic to ordered atomic - can't forward
+define i32 @non_local_pre4(i32* %P1) {
+; CHECK-LABEL: @non_local_pre4(
+; CHECK: %a = load atomic i32, i32* %P1 seq_cst
+; CHECK: %b = load atomic i32, i32* %P1 seq_cst
+; CHECK: ret i32 %b
+ %a = load atomic i32, i32* %P1 seq_cst, align 4
+ %cmp = icmp eq i32 %a, 0
+ br i1 %cmp, label %early, label %next
+early:
+ call void @clobber()
+ br label %next
+next:
+ %b = load atomic i32, i32* %P1 seq_cst, align 4
+ ret i32 %b
+}
+
+; can't remove volatile on any path
+define i32 @non_local_pre5(i32* %P1) {
+; CHECK-LABEL: @non_local_pre5(
+; CHECK: %a = load atomic i32, i32* %P1 seq_cst
+; CHECK: %b = load volatile i32, i32* %P1
+; CHECK: ret i32 %b
+ %a = load atomic i32, i32* %P1 seq_cst, align 4
+ %cmp = icmp eq i32 %a, 0
+ br i1 %cmp, label %early, label %next
+early:
+ call void @clobber()
+ br label %next
+next:
+ %b = load volatile i32, i32* %P1
+ ret i32 %b
+}
+
+
+; ordered atomic to unordered atomic
+define i32 @non_local_pre6(i32* %P1) {
+; CHECK-LABEL: @non_local_pre6(
+; CHECK: load atomic i32, i32* %P1 seq_cst
+; CHECK: load atomic i32, i32* %P1 unordered
+; CHECK: %b = phi i32 [ %b.pre, %early ], [ %a, %0 ]
+; CHECK: ret i32 %b
+ %a = load atomic i32, i32* %P1 seq_cst, align 4
+ %cmp = icmp eq i32 %a, 0
+ br i1 %cmp, label %early, label %next
+early:
+ call void @clobber()
+ br label %next
+next:
+ %b = load atomic i32, i32* %P1 unordered, align 4
+ ret i32 %b
+}
+
More information about the llvm-commits
mailing list