[llvm] 194899c - [MemoryDependency] Relax the re-ordering of atomic store and unordered load/store

Serguei Katkov via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 16 20:20:28 PST 2022


Author: Serguei Katkov
Date: 2022-02-17T10:53:25+07:00
New Revision: 194899caef241fe3b61be092fd5dd81bfd2c3975

URL: https://github.com/llvm/llvm-project/commit/194899caef241fe3b61be092fd5dd81bfd2c3975
DIFF: https://github.com/llvm/llvm-project/commit/194899caef241fe3b61be092fd5dd81bfd2c3975.diff

LOG: [MemoryDependency] Relax the re-ordering of atomic store and unordered load/store

Atomic store with Release semantic allows re-ordering of unordered load/store before the store.
Implement it.

Reviewers: reames
Reviewed By: reames
Subscribers: llvm-commits
Differential Revision: https://reviews.llvm.org/D119844

Added: 
    

Modified: 
    llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
    llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
index a4491f481c0fb..aaeba903f43df 100644
--- a/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ b/llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -424,6 +424,16 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
     return false;
   };
 
+  // Return "true" if and only if the instruction I is either a non-unordered
+  // load or a non-unordered store.
+  auto isNonUnorderedLoadOrStore = [](Instruction *I) -> bool {
+    if (auto *LI = dyn_cast<LoadInst>(I))
+      return !LI->isUnordered();
+    if (auto *SI = dyn_cast<StoreInst>(I))
+      return !SI->isUnordered();
+    return false;
+  };
+
   // Return "true" if I is not a load and not a store, but it does access
   // memory.
   auto isOtherMemAccess = [](Instruction *I) -> bool {
@@ -549,11 +559,18 @@ MemDepResult MemoryDependenceResults::getSimplePointerDependencyFrom(
       // A Monotonic store is OK if the query inst is itself not atomic.
       // FIXME: This is overly conservative.
       if (!SI->isUnordered() && SI->isAtomic()) {
-        if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
+        if (!QueryInst || isNonUnorderedLoadOrStore(QueryInst) ||
             isOtherMemAccess(QueryInst))
           return MemDepResult::getClobber(SI);
-        if (SI->getOrdering() != AtomicOrdering::Monotonic)
-          return MemDepResult::getClobber(SI);
+        // Ok, if we are here the guard above guarantee us that
+        // QueryInst is a non-atomic or unordered load/store.
+        // SI is atomic with monotonic or release semantic (seq_cst for store
+        // is actually a release semantic plus total order over other seq_cst
+        // instructions, as soon as QueryInst is not seq_cst we can consider it
+        // as simple release semantic).
+        // Monotonic and Release semantic allows re-ordering before store
+        // so we are safe to go further and check the aliasing. It will prohibit
+        // re-ordering in case locations are may or must alias.
       }
 
       // While volatile access cannot be eliminated, they do not have to clobber

diff  --git a/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll b/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll
index a06c7e0792792..b6153078816b5 100644
--- a/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll
+++ b/llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll
@@ -40,11 +40,8 @@ define i32 @test_load_acquire_unordered() {
 
 define i32 @test_store_cst_unordered(i32 %x) {
 ; CHECK-LABEL: @test_store_cst_unordered(
-; CHECK-NEXT:    [[L1:%.*]] = load atomic i32, i32* @w unordered, align 4
 ; CHECK-NEXT:    store atomic i32 [[X:%.*]], i32* @u seq_cst, align 4
-; CHECK-NEXT:    [[L2:%.*]] = load atomic i32, i32* @w unordered, align 4
-; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[L1]], [[L2]]
-; CHECK-NEXT:    ret i32 [[RES]]
+; CHECK-NEXT:    ret i32 0
 ;
   %l1 = load atomic i32, i32* @w unordered, align 4
   store atomic i32 %x, i32* @u seq_cst, align 4
@@ -55,11 +52,8 @@ define i32 @test_store_cst_unordered(i32 %x) {
 
 define i32 @test_store_release_unordered(i32 %x) {
 ; CHECK-LABEL: @test_store_release_unordered(
-; CHECK-NEXT:    [[L1:%.*]] = load atomic i32, i32* @w unordered, align 4
 ; CHECK-NEXT:    store atomic i32 [[X:%.*]], i32* @u release, align 4
-; CHECK-NEXT:    [[L2:%.*]] = load atomic i32, i32* @w unordered, align 4
-; CHECK-NEXT:    [[RES:%.*]] = sub i32 [[L1]], [[L2]]
-; CHECK-NEXT:    ret i32 [[RES]]
+; CHECK-NEXT:    ret i32 0
 ;
   %l1 = load atomic i32, i32* @w unordered, align 4
   store atomic i32 %x, i32* @u release, align 4


        


More information about the llvm-commits mailing list