[llvm] [AA] Improve precision for monotonic atomic load/store operations (PR #158169)
Jin Huang via llvm-commits
llvm-commits at lists.llvm.org
Tue Sep 23 12:26:27 PDT 2025
https://github.com/jinhuang1102 updated https://github.com/llvm/llvm-project/pull/158169
>From 05c40eb40c97449ccb0e8b7a39b78ed2612242f1 Mon Sep 17 00:00:00 2001
From: Jin Huang <jingold at google.com>
Date: Thu, 11 Sep 2025 17:40:11 +0000
Subject: [PATCH 1/2] [AA] A conservative fix for atomic instruciton.
---
llvm/lib/Analysis/AliasAnalysis.cpp | 15 ++++++++++++---
.../DeadStoreElimination/atomic-todo.ll | 11 +++++++++--
2 files changed, 21 insertions(+), 5 deletions(-)
diff --git a/llvm/lib/Analysis/AliasAnalysis.cpp b/llvm/lib/Analysis/AliasAnalysis.cpp
index 3ec009ca4adde..cb9577aac405a 100644
--- a/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -421,9 +421,18 @@ ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
const MemoryLocation &Loc,
AAQueryInfo &AAQI) {
// Be conservative in the face of atomic.
- if (isStrongerThan(L->getOrdering(), AtomicOrdering::Unordered))
+ if (isStrongerThan(L->getOrdering(), AtomicOrdering::Monotonic))
return ModRefInfo::ModRef;
+ // For Monotonic and unordered atomic loads, if the locations are not NoAlias,
+ // we must be conservative and return ModRef to prevent unsafe reordering of
+ // accesses to the same memory.
+ if (L->isAtomic()){
+ if (Loc.Ptr &&
+ alias(MemoryLocation::get(L), Loc, AAQI, L) != AliasResult::NoAlias)
+ return ModRefInfo::ModRef;
+ }
+
// If the load address doesn't alias the given address, it doesn't read
// or write the specified memory.
if (Loc.Ptr) {
@@ -439,7 +448,7 @@ ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
const MemoryLocation &Loc,
AAQueryInfo &AAQI) {
// Be conservative in the face of atomic.
- if (isStrongerThan(S->getOrdering(), AtomicOrdering::Unordered))
+ if (isStrongerThan(S->getOrdering(), AtomicOrdering::Monotonic))
return ModRefInfo::ModRef;
if (Loc.Ptr) {
@@ -458,7 +467,7 @@ ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
}
// Otherwise, a store just writes.
- return ModRefInfo::Mod;
+ return ModRefInfo::ModRef;
}
ModRefInfo AAResults::getModRefInfo(const FenceInst *S,
diff --git a/llvm/test/Transforms/DeadStoreElimination/atomic-todo.ll b/llvm/test/Transforms/DeadStoreElimination/atomic-todo.ll
index 1c160442f8579..16f8a52b00eab 100644
--- a/llvm/test/Transforms/DeadStoreElimination/atomic-todo.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/atomic-todo.ll
@@ -1,4 +1,3 @@
-; XFAIL: *
; RUN: opt -passes=dse -S < %s | FileCheck %s
target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
@@ -11,7 +10,7 @@ target triple = "x86_64-apple-macosx10.7.0"
@x = common global i32 0, align 4
@y = common global i32 0, align 4
-; DSE across monotonic load (allowed as long as the eliminated store isUnordered)
+; DSE across monotonic load (allowed if the monotonic load's address is NoAlias)
define i32 @test9() {
; CHECK-LABEL: test9
; CHECK-NOT: store i32 0
@@ -21,3 +20,11 @@ define i32 @test9() {
store i32 1, ptr @x
ret i32 %x
}
+
+; DSE across monotonic load (blocked if the atomic load's address isn't NoAlias)
+define i32 @test9a() {
+ store i32 0, ptr @x
+ %x = load atomic i32, ptr @ptr monotonic, align 4
+ store i32 1, ptr @x
+ ret i32 %x
+}
>From 7b32cf31ce31536c9b490bbe1fbd41fe792c726a Mon Sep 17 00:00:00 2001
From: Jin Huang <jingold at google.com>
Date: Fri, 12 Sep 2025 00:06:37 +0000
Subject: [PATCH 2/2] [AA] Improve precision for monotonic atomic load/store
operations
---
llvm/lib/Analysis/AliasAnalysis.cpp | 26 +++++++++-------
.../DeadStoreElimination/atomic-todo.ll | 30 -------------------
.../Transforms/DeadStoreElimination/atomic.ll | 25 ++++++++++++++++
3 files changed, 40 insertions(+), 41 deletions(-)
delete mode 100644 llvm/test/Transforms/DeadStoreElimination/atomic-todo.ll
diff --git a/llvm/lib/Analysis/AliasAnalysis.cpp b/llvm/lib/Analysis/AliasAnalysis.cpp
index cb9577aac405a..8d961f23503aa 100644
--- a/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -424,15 +424,6 @@ ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
if (isStrongerThan(L->getOrdering(), AtomicOrdering::Monotonic))
return ModRefInfo::ModRef;
- // For Monotonic and unordered atomic loads, if the locations are not NoAlias,
- // we must be conservative and return ModRef to prevent unsafe reordering of
- // accesses to the same memory.
- if (L->isAtomic()){
- if (Loc.Ptr &&
- alias(MemoryLocation::get(L), Loc, AAQI, L) != AliasResult::NoAlias)
- return ModRefInfo::ModRef;
- }
-
// If the load address doesn't alias the given address, it doesn't read
// or write the specified memory.
if (Loc.Ptr) {
@@ -440,6 +431,13 @@ ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
if (AR == AliasResult::NoAlias)
return ModRefInfo::NoModRef;
}
+
+ assert(!isStrongerThanMonotonic(L->getOrdering()) &&
+ "Stronger atomic orderings should have been handled above!");
+
+ if (isStrongerThanUnordered(L->getOrdering()))
+ return ModRefInfo::ModRef;
+
// Otherwise, a load just reads.
return ModRefInfo::Ref;
}
@@ -466,8 +464,14 @@ ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
return ModRefInfo::NoModRef;
}
- // Otherwise, a store just writes.
- return ModRefInfo::ModRef;
+ assert(!isStrongerThanMonotonic(S->getOrdering()) &&
+ "Stronger atomic orderings should have been handled above!" );
+
+ if (isStrongerThanUnordered(S->getOrdering()))
+ return ModRefInfo::ModRef;
+
+ // A store just writes.
+ return ModRefInfo::Mod;
}
ModRefInfo AAResults::getModRefInfo(const FenceInst *S,
diff --git a/llvm/test/Transforms/DeadStoreElimination/atomic-todo.ll b/llvm/test/Transforms/DeadStoreElimination/atomic-todo.ll
deleted file mode 100644
index 16f8a52b00eab..0000000000000
--- a/llvm/test/Transforms/DeadStoreElimination/atomic-todo.ll
+++ /dev/null
@@ -1,30 +0,0 @@
-; RUN: opt -passes=dse -S < %s | FileCheck %s
-
-target datalayout = "e-p:64:64:64-i1:8:8-i8:8:8-i16:16:16-i32:32:32-i64:64:64-f32:32:32-f64:64:64-v64:64:64-v128:128:128-a0:0:64-s0:64:64-f80:128:128-n8:16:32:64"
-target triple = "x86_64-apple-macosx10.7.0"
-
-; Basic correctness tests for atomic stores.
-; Note that it turns out essentially every transformation DSE does is legal on
-; atomic ops, just some transformations are not allowed across release-acquire pairs.
-
- at x = common global i32 0, align 4
- at y = common global i32 0, align 4
-
-; DSE across monotonic load (allowed if the monotonic load's address is NoAlias)
-define i32 @test9() {
-; CHECK-LABEL: test9
-; CHECK-NOT: store i32 0
-; CHECK: store i32 1
- store i32 0, ptr @x
- %x = load atomic i32, ptr @y monotonic, align 4
- store i32 1, ptr @x
- ret i32 %x
-}
-
-; DSE across monotonic load (blocked if the atomic load's address isn't NoAlias)
-define i32 @test9a() {
- store i32 0, ptr @x
- %x = load atomic i32, ptr @ptr monotonic, align 4
- store i32 1, ptr @x
- ret i32 %x
-}
diff --git a/llvm/test/Transforms/DeadStoreElimination/atomic.ll b/llvm/test/Transforms/DeadStoreElimination/atomic.ll
index 55b9384e88d93..67a6e9dd6c7e8 100644
--- a/llvm/test/Transforms/DeadStoreElimination/atomic.ll
+++ b/llvm/test/Transforms/DeadStoreElimination/atomic.ll
@@ -88,6 +88,31 @@ define i32 @test8() {
ret i32 %x
}
+; DSE across monotonic load (allowed if the monotonic load's address is NoAlias)
+define i32 @test9() {
+; CHECK-LABEL: test9
+; CHECK-NOT: store i32 0
+; CHECK: store i32 1
+ store i32 0, ptr @x
+ %x = load atomic i32, ptr @y monotonic, align 4
+ store i32 1, ptr @x
+ ret i32 %x
+}
+
+; DSE across monotonic load (blocked if the atomic load's address isn't NoAlias)
+define i32 @test9a(ptr %ptr) {
+; CHECK-LABEL: @test9a(
+; CHECK-NEXT: store i32 0, ptr @x, align 4
+; CHECK-NEXT: [[X:%.*]] = load atomic i32, ptr [[PTR:%.*]] monotonic, align 4
+; CHECK-NEXT: store i32 1, ptr @x, align 4
+; CHECK-NEXT: ret i32 [[X]]
+;
+ store i32 0, ptr @x
+ %x = load atomic i32, ptr %ptr monotonic, align 4
+ store i32 1, ptr @x
+ ret i32 %x
+}
+
; DSE across monotonic store (allowed as long as the eliminated store isUnordered)
define void @test10() {
; CHECK-LABEL: test10
More information about the llvm-commits
mailing list