[PATCH] D119844: [MemoryDependency] Relax the re-ordering of atomic store and unordered load/store
Serguei Katkov via Phabricator via llvm-commits
llvm-commits at lists.llvm.org
Tue Feb 15 07:35:00 PST 2022
skatkov created this revision.
skatkov added reviewers: reames, fhahn, efriedma, anna.
Herald added subscribers: bmahjour, hiraditya.
skatkov requested review of this revision.
Herald added a project: LLVM.
Atomic store with Release semantic allows re-ordering of unordered load/store before the store.
Implement it.
https://reviews.llvm.org/D119844
Files:
llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll
Index: llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll
===================================================================
--- /dev/null
+++ llvm/test/Analysis/MemoryDependenceAnalysis/reorder-over-store-atomic.ll
@@ -0,0 +1,31 @@
+; RUN: opt -S -gvn -basic-aa < %s | FileCheck %s
+
+; Check that volatile store does not prevent re-ordering of simple loads/store.
+
+%union.anon = type { i32 }
+
+ at u = global i32 5, align 4
+ at w = global i32 10, align 4
+
+define i32 @test_load() {
+; CHECK-LABEL: @test_load(
+; CHECK: unordered
+; CHECK: unordered
+; CHECK: ret i32 %res
+ %l1 = load atomic i32, i32* @w unordered, align 4
+ %lv = load atomic i32, i32* @u seq_cst, align 4
+ %l2 = load atomic i32, i32* @w unordered, align 4
+ %res.1 = sub i32 %l1, %l2
+ %res = add i32 %res.1, %lv
+ ret i32 %res
+}
+
+define i32 @test_store(i32 %x) {
+; CHECK-LABEL: @test_store(
+; CHECK: ret i32 0
+ %l1 = load atomic i32, i32* @w unordered, align 4
+ store atomic i32 %x, i32* @u seq_cst, align 4
+ %l2 = load atomic i32, i32* @w unordered, align 4
+ %res = sub i32 %l1, %l2
+ ret i32 %res
+}
\ No newline at end of file
Index: llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
===================================================================
--- llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
+++ llvm/lib/Analysis/MemoryDependenceAnalysis.cpp
@@ -424,6 +424,16 @@
return false;
};
+ // Return "true" if and only if the instruction I is either a non-unordered
+ // load or a non-unordered store.
+ auto isNonUnorderedLoadOrStore = [](Instruction *I) -> bool {
+ if (auto *LI = dyn_cast<LoadInst>(I))
+ return !LI->isUnordered();
+ if (auto *SI = dyn_cast<StoreInst>(I))
+ return !SI->isUnordered();
+ return false;
+ };
+
// Return "true" if I is not a load and not a store, but it does access
// memory.
auto isOtherMemAccess = [](Instruction *I) -> bool {
@@ -549,11 +559,15 @@
// A Monotonic store is OK if the query inst is itself not atomic.
// FIXME: This is overly conservative.
if (!SI->isUnordered() && SI->isAtomic()) {
- if (!QueryInst || isNonSimpleLoadOrStore(QueryInst) ||
+ if (!QueryInst || isNonUnorderedLoadOrStore(QueryInst) ||
isOtherMemAccess(QueryInst))
return MemDepResult::getClobber(SI);
- if (SI->getOrdering() != AtomicOrdering::Monotonic)
- return MemDepResult::getClobber(SI);
+ // Ok, if we are here the guard above guarantee us that
+ // SI is atomic with monotonic ot release semantic and
+ // QueryInst is a non-atomic or unordered load/store.
+ // Monotonic and Release semantic allows re-ordering before store
+ // so we are safe to go further and check the aliasing. It will prohibit
+ // re-ordering in case locations are may or must alias.
}
// FIXME: this is overly conservative.
-------------- next part --------------
A non-text attachment was scrubbed...
Name: D119844.408870.patch
Type: text/x-patch
Size: 2911 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20220215/7d99df2d/attachment.bin>
More information about the llvm-commits
mailing list