[llvm] [AA] A conservative fix for atomic store instruction. (PR #155032)
Jin Huang via llvm-commits
llvm-commits at lists.llvm.org
Wed Sep 3 23:00:32 PDT 2025
https://github.com/jinhuang1102 updated https://github.com/llvm/llvm-project/pull/155032
>From bc2541e22fb0dfe097495008b927aa26f2ce7f0f Mon Sep 17 00:00:00 2001
From: Jin Huang <jingold at google.com>
Date: Fri, 22 Aug 2025 21:27:27 +0000
Subject: [PATCH] [AA] A conservative fix for atomic store instruction. The
current atomic store check return all atomic store as a modify instruciton
without any memory check. This pr will raise from unordered to Monotonic.
---
llvm/lib/Analysis/AliasAnalysis.cpp | 33 ++++---
llvm/unittests/Analysis/AliasAnalysisTest.cpp | 99 ++++++++++++++++---
2 files changed, 109 insertions(+), 23 deletions(-)
diff --git a/llvm/lib/Analysis/AliasAnalysis.cpp b/llvm/lib/Analysis/AliasAnalysis.cpp
index 3ec009ca4adde..b6da0cfa6951b 100644
--- a/llvm/lib/Analysis/AliasAnalysis.cpp
+++ b/llvm/lib/Analysis/AliasAnalysis.cpp
@@ -54,8 +54,8 @@
using namespace llvm;
-STATISTIC(NumNoAlias, "Number of NoAlias results");
-STATISTIC(NumMayAlias, "Number of MayAlias results");
+STATISTIC(NumNoAlias, "Number of NoAlias results");
+STATISTIC(NumMayAlias, "Number of MayAlias results");
STATISTIC(NumMustAlias, "Number of MustAlias results");
/// Allow disabling BasicAA from the AA results. This is particularly useful
@@ -118,8 +118,8 @@ AliasResult AAResults::alias(const MemoryLocation &LocA,
if (EnableAATrace) {
for (unsigned I = 0; I < AAQI.Depth; ++I)
dbgs() << " ";
- dbgs() << "Start " << *LocA.Ptr << " @ " << LocA.Size << ", "
- << *LocB.Ptr << " @ " << LocB.Size << "\n";
+ dbgs() << "Start " << *LocA.Ptr << " @ " << LocA.Size << ", " << *LocB.Ptr
+ << " @ " << LocB.Size << "\n";
}
AAQI.Depth++;
@@ -133,8 +133,8 @@ AliasResult AAResults::alias(const MemoryLocation &LocA,
if (EnableAATrace) {
for (unsigned I = 0; I < AAQI.Depth; ++I)
dbgs() << " ";
- dbgs() << "End " << *LocA.Ptr << " @ " << LocA.Size << ", "
- << *LocB.Ptr << " @ " << LocB.Size << " = " << Result << "\n";
+ dbgs() << "End " << *LocA.Ptr << " @ " << LocA.Size << ", " << *LocB.Ptr
+ << " @ " << LocB.Size << " = " << Result << "\n";
}
if (AAQI.Depth == 0) {
@@ -421,9 +421,18 @@ ModRefInfo AAResults::getModRefInfo(const LoadInst *L,
const MemoryLocation &Loc,
AAQueryInfo &AAQI) {
// Be conservative in the face of atomic.
- if (isStrongerThan(L->getOrdering(), AtomicOrdering::Unordered))
+ if (isStrongerThan(L->getOrdering(), AtomicOrdering::Monotonic))
return ModRefInfo::ModRef;
+ // For Monotonic and unordered loads, we only need to be more conservative
+ // when the locations are MustAlias to prevent unsafe reordering of accesses
+ // to the same memory. For MayAlias, we can treat it as a normal read.
+ if (L->isAtomic()) {
+ if (Loc.Ptr &&
+ alias(MemoryLocation::get(L), Loc, AAQI, L) == AliasResult::MustAlias)
+ return ModRefInfo::ModRef;
+ }
+
// If the load address doesn't alias the given address, it doesn't read
// or write the specified memory.
if (Loc.Ptr) {
@@ -439,7 +448,7 @@ ModRefInfo AAResults::getModRefInfo(const StoreInst *S,
const MemoryLocation &Loc,
AAQueryInfo &AAQI) {
// Be conservative in the face of atomic.
- if (isStrongerThan(S->getOrdering(), AtomicOrdering::Unordered))
+ if (isStrongerThan(S->getOrdering(), AtomicOrdering::Monotonic))
return ModRefInfo::ModRef;
if (Loc.Ptr) {
@@ -538,7 +547,8 @@ ModRefInfo AAResults::getModRefInfo(const AtomicCmpXchgInst *CX,
ModRefInfo AAResults::getModRefInfo(const AtomicRMWInst *RMW,
const MemoryLocation &Loc,
AAQueryInfo &AAQI) {
- // Acquire/Release atomicrmw has properties that matter for arbitrary addresses.
+ // Acquire/Release atomicrmw has properties that matter for arbitrary
+ // addresses.
if (isStrongerThanMonotonic(RMW->getOrdering()))
return ModRefInfo::ModRef;
@@ -600,8 +610,7 @@ ModRefInfo AAResults::getModRefInfo(const Instruction *I,
/// with a smarter AA in place, this test is just wasting compile time.
ModRefInfo AAResults::callCapturesBefore(const Instruction *I,
const MemoryLocation &MemLoc,
- DominatorTree *DT,
- AAQueryInfo &AAQI) {
+ DominatorTree *DT, AAQueryInfo &AAQI) {
if (!DT)
return ModRefInfo::ModRef;
@@ -677,7 +686,7 @@ bool AAResults::canInstructionRangeModRef(const Instruction &I1,
"Instructions not in same basic block!");
BasicBlock::const_iterator I = I1.getIterator();
BasicBlock::const_iterator E = I2.getIterator();
- ++E; // Convert from inclusive to exclusive range.
+ ++E; // Convert from inclusive to exclusive range.
for (; I != E; ++I) // Check every instruction in range
if (isModOrRefSet(getModRefInfo(&*I, Loc) & Mode))
diff --git a/llvm/unittests/Analysis/AliasAnalysisTest.cpp b/llvm/unittests/Analysis/AliasAnalysisTest.cpp
index 06066b1b92c51..a190cafbf237e 100644
--- a/llvm/unittests/Analysis/AliasAnalysisTest.cpp
+++ b/llvm/unittests/Analysis/AliasAnalysisTest.cpp
@@ -26,9 +26,9 @@ using namespace llvm;
// Set up some test passes.
namespace llvm {
-void initializeAATestPassPass(PassRegistry&);
-void initializeTestCustomAAWrapperPassPass(PassRegistry&);
-}
+void initializeAATestPassPass(PassRegistry &);
+void initializeTestCustomAAWrapperPassPass(PassRegistry &);
+} // namespace llvm
namespace {
struct AATestPass : FunctionPass {
@@ -61,7 +61,7 @@ struct AATestPass : FunctionPass {
return false;
}
};
-}
+} // namespace
char AATestPass::ID = 0;
INITIALIZE_PASS_BEGIN(AATestPass, "aa-test-pas", "Alias Analysis Test Pass",
@@ -90,7 +90,7 @@ struct TestCustomAAResult : AAResultBase {
return AliasResult::MayAlias;
}
};
-}
+} // namespace
namespace {
/// A wrapper pass for the legacy pass manager to use with the above custom AA
@@ -126,14 +126,14 @@ class TestCustomAAWrapperPass : public ImmutablePass {
TestCustomAAResult &getResult() { return *Result; }
const TestCustomAAResult &getResult() const { return *Result; }
};
-}
+} // namespace
char TestCustomAAWrapperPass::ID = 0;
INITIALIZE_PASS_BEGIN(TestCustomAAWrapperPass, "test-custom-aa",
- "Test Custom AA Wrapper Pass", false, true)
+ "Test Custom AA Wrapper Pass", false, true)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
INITIALIZE_PASS_END(TestCustomAAWrapperPass, "test-custom-aa",
- "Test Custom AA Wrapper Pass", false, true)
+ "Test Custom AA Wrapper Pass", false, true)
namespace {
@@ -246,7 +246,8 @@ TEST_F(AliasAnalysisTest, BatchAAPhiCycles) {
%s2 = select i1 %c, i8* %a2, i8* %a1
br label %loop
}
- )", Err, C);
+ )",
+ Err, C);
Function *F = M->getFunction("f");
Instruction *Phi = getInstructionByName(*F, "phi");
@@ -291,7 +292,8 @@ TEST_F(AliasAnalysisTest, BatchAAPhiAssumption) {
%b.next = getelementptr i8, i8* %b, i64 1
br label %loop
}
- )", Err, C);
+ )",
+ Err, C);
Function *F = M->getFunction("f");
Instruction *A = getInstructionByName(*F, "a");
@@ -425,4 +427,79 @@ TEST_F(AAPassInfraTest, injectExternalAA) {
EXPECT_TRUE(IsCustomAAQueried);
}
-} // end anonymous namspace
+// Test that a monotonic atomic load is treated as ModRef ONLY against a
+// MustAlias locations, and as Ref against a MayAlias location.
+TEST_F(AliasAnalysisTest, MonotonicAtomicLoadIsModRefOnlyForMustAlias) {
+ LLVMContext C;
+ SMDiagnostic Err;
+ std::unique_ptr<Module> M = parseAssemblyString(R"(
+ define void @atomic_load_modref_test(i8* %p1, i8* %p2) {
+ entry:
+ %val = load atomic i8, i8* %p1 monotonic, align 1
+ ret void
+ }
+ )",
+ Err, C);
+
+ ASSERT_TRUE(M);
+
+ Function *F = M->getFunction("atomic_load_modref_test");
+ const Instruction *AtomicLoad = &F->getEntryBlock().front();
+ const Value *Ptr1 = F->getArg(0);
+ const Value *Ptr2 = F->getArg(1);
+
+ MemoryLocation Ptr1Loc = MemoryLocation(Ptr1, LocationSize::precise(1));
+ MemoryLocation Ptr2Loc = MemoryLocation(Ptr2, LocationSize::precise(1));
+
+ auto &AA = getAAResults(*F);
+
+ // 1. MustAlias Case: The atomic load on %p1 against a location on %p1.
+ // This should trigger the new logic and return ModRef.
+ EXPECT_EQ(ModRefInfo::ModRef, AA.getModRefInfo(AtomicLoad, Ptr1Loc));
+
+ // 2. MayAlias Case: The atomic load on %p1 against a location on %p2.
+ // This should NOT trigger the new logic and should fall back to returning
+ // Ref.
+ EXPECT_EQ(ModRefInfo::Ref, AA.getModRefInfo(AtomicLoad, Ptr2Loc));
+}
+
+// Test that a monotonic atomic store is no longer considered "strong" and
+// falls through to the alias checking logic, while stroner orderings do not.
+TEST_F(AliasAnalysisTest, MonotonicAtomicStoreAliasBehavior) {
+ LLVMContext C;
+ SMDiagnostic Err;
+ std::unique_ptr<Module> M = parseAssemblyString(R"(
+ define void @atomic_store_test(i8* noalias %p1, i8* noalias %p2) {
+ entry:
+ store atomic i8 0, i8* %p1 monotonic, align 1
+ store atomic i8 0, i8* %p1 release, align 1
+ ret void
+ }
+ )",
+ Err, C);
+
+ ASSERT_TRUE(M);
+
+ Function *F = M->getFunction("atomic_store_test");
+ auto It = F->getEntryBlock().begin();
+ const Instruction *MonotonicStore = &*It++;
+ const Instruction *ReleaseStore = &*It;
+
+ const Value *Ptr2 = F->getArg(1);
+ MemoryLocation Ptr2Loc = MemoryLocation(Ptr2, LocationSize::precise(1));
+
+ auto &AA = getAAResults(*F);
+
+ // 1. Test the Monotonic store.
+ // With the change, the Monotonic store should go through the
+ // isStrongerThan() check. Since %p1 and %p2 are noalias, the alias check
+ // should return NoAlias, resulting in NoModRef.
+ EXPECT_EQ(ModRefInfo::NoModRef, AA.getModRefInfo(MonotonicStore, Ptr2Loc));
+
+ // 2. Test the Relase (stronger) store.
+ // The release atomic store should be caught by the isStrongerThan() check.
+ // return ModRef without performing an alias check.
+ EXPECT_EQ(ModRefInfo::ModRef, AA.getModRefInfo(ReleaseStore, Ptr2Loc));
+}
+
+} // namespace
More information about the llvm-commits
mailing list