[llvm] r215942 - Weak relaxing of the constraints on atomics in MemoryDependencyAnalysis

Robin Morisset morisset at google.com
Mon Aug 18 15:18:12 PDT 2014


Author: morisset
Date: Mon Aug 18 17:18:11 2014
New Revision: 215942

URL: http://llvm.org/viewvc/llvm-project?rev=215942&view=rev
Log:
Weak relaxing of the constraints on atomics in MemoryDependencyAnalysis

Monotonic accesses do not have to kill the analysis, as long as the QueryInstr is not
itself atomic.

Modified:
    llvm/trunk/lib/Analysis/MemoryDependenceAnalysis.cpp
    llvm/trunk/test/Transforms/DeadStoreElimination/atomic.ll

Modified: llvm/trunk/lib/Analysis/MemoryDependenceAnalysis.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Analysis/MemoryDependenceAnalysis.cpp?rev=215942&r1=215941&r2=215942&view=diff
==============================================================================
--- llvm/trunk/lib/Analysis/MemoryDependenceAnalysis.cpp (original)
+++ llvm/trunk/lib/Analysis/MemoryDependenceAnalysis.cpp Mon Aug 18 17:18:11 2014
@@ -409,9 +409,18 @@ getPointerDependencyFrom(const AliasAnal
     // a load depends on another must aliased load from the same value.
     if (LoadInst *LI = dyn_cast<LoadInst>(Inst)) {
       // Atomic loads have complications involved.
+      // A monotonic load is OK if the query inst is itself not atomic.
       // FIXME: This is overly conservative.
-      if (!LI->isUnordered())
-        return MemDepResult::getClobber(LI);
+      if (!LI->isUnordered()) {
+        if (!QueryInst || LI->getOrdering() != Monotonic)
+          return MemDepResult::getClobber(LI);
+        if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst))
+          if (!QueryLI->isUnordered())
+            return MemDepResult::getClobber(LI);
+        if (auto *QuerySI = dyn_cast<StoreInst>(QueryInst))
+          if (!QuerySI->isUnordered())
+            return MemDepResult::getClobber(LI);
+      }
 
       AliasAnalysis::Location LoadLoc = AA->getLocation(LI);
 
@@ -469,9 +478,18 @@ getPointerDependencyFrom(const AliasAnal
 
     if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
       // Atomic stores have complications involved.
+      // A monotonic store is OK if the query inst is itself not atomic.
       // FIXME: This is overly conservative.
-      if (!SI->isUnordered())
-        return MemDepResult::getClobber(SI);
+      if (!SI->isUnordered()) {
+        if (!QueryInst || SI->getOrdering() != Monotonic)
+          return MemDepResult::getClobber(SI);
+        if (auto *QueryLI = dyn_cast<LoadInst>(QueryInst))
+          if (!QueryLI->isUnordered())
+            return MemDepResult::getClobber(SI);
+        if (auto *QuerySI = dyn_cast<StoreInst>(QueryInst))
+          if (!QuerySI->isUnordered())
+            return MemDepResult::getClobber(SI);
+      }
 
       // If alias analysis can tell that this store is guaranteed to not modify
       // the query pointer, ignore it.  Use getModRefInfo to handle cases where

Modified: llvm/trunk/test/Transforms/DeadStoreElimination/atomic.ll
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/test/Transforms/DeadStoreElimination/atomic.ll?rev=215942&r1=215941&r2=215942&view=diff
==============================================================================
--- llvm/trunk/test/Transforms/DeadStoreElimination/atomic.ll (original)
+++ llvm/trunk/test/Transforms/DeadStoreElimination/atomic.ll Mon Aug 18 17:18:11 2014
@@ -105,3 +105,50 @@ entry:
   ret i32 %x
 }
 
+; DSE across monotonic load (allowed as long as the eliminated store isUnordered)
+define i32 @test9()  nounwind uwtable ssp {
+; CHECK: test9
+; CHECK-NOT: store i32 0
+; CHECK: store i32 1
+entry:
+  store i32 0, i32* @x
+  %x = load atomic i32* @y monotonic, align 4
+  store i32 1, i32* @x
+  ret i32 %x
+}
+
+; DSE across monotonic store (allowed as long as the eliminated store isUnordered)
+define void @test10()  nounwind uwtable ssp {
+; CHECK: test10
+; CHECK-NOT: store i32 0
+; CHECK: store i32 1
+entry:
+  store i32 0, i32* @x
+  store atomic i32 42, i32* @y monotonic, align 4
+  store i32 1, i32* @x
+  ret void
+}
+
+; DSE across monotonic load (forbidden since the eliminated store is atomic)
+define i32 @test11()  nounwind uwtable ssp {
+; CHECK: test11
+; CHECK: store atomic i32 0
+; CHECK: store atomic i32 1
+entry:
+  store atomic i32 0, i32* @x monotonic, align 4
+  %x = load atomic i32* @y monotonic, align 4
+  store atomic i32 1, i32* @x monotonic, align 4
+  ret i32 %x
+}
+
+; DSE across monotonic store (forbidden since the eliminated store is atomic)
+define void @test12()  nounwind uwtable ssp {
+; CHECK: test12
+; CHECK: store atomic i32 0
+; CHECK: store atomic i32 1
+entry:
+  store atomic i32 0, i32* @x monotonic, align 4
+  store atomic i32 42, i32* @y monotonic, align 4
+  store atomic i32 1, i32* @x monotonic, align 4
+  ret void
+}





More information about the llvm-commits mailing list