[llvm] [IR] Check identical alignment for atomic instructions (PR #155349)
Ellis Hoag via llvm-commits
llvm-commits at lists.llvm.org
Tue Aug 26 06:52:54 PDT 2025
https://github.com/ellishg updated https://github.com/llvm/llvm-project/pull/155349
>From e687f1f0d2085a093569d2b2eaa7cf24c03a8924 Mon Sep 17 00:00:00 2001
From: Ellis Hoag <ellishoag at meta.com>
Date: Mon, 25 Aug 2025 20:17:28 -0700
Subject: [PATCH 1/2] Check identical alignment for atomic instrs
---
llvm/lib/IR/Instruction.cpp | 6 +-
.../Analysis/IRSimilarityIdentifierTest.cpp | 159 ++++++++++++++++++
2 files changed, 164 insertions(+), 1 deletion(-)
diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp
index 5e87b5ff941ad..6010e2a99cf01 100644
--- a/llvm/lib/IR/Instruction.cpp
+++ b/llvm/lib/IR/Instruction.cpp
@@ -864,7 +864,7 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
bool Instruction::hasSameSpecialState(const Instruction *I2,
bool IgnoreAlignment,
bool IntersectAttrs) const {
- auto I1 = this;
+ const auto *I1 = this;
assert(I1->getOpcode() == I2->getOpcode() &&
"Can not compare special state of different instructions");
@@ -917,6 +917,8 @@ bool Instruction::hasSameSpecialState(const Instruction *I2,
FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
+ (CXI->getAlign() == cast<AtomicCmpXchgInst>(I2)->getAlign() ||
+ IgnoreAlignment) &&
CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
CXI->getSuccessOrdering() ==
cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
@@ -927,6 +929,8 @@ bool Instruction::hasSameSpecialState(const Instruction *I2,
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
+ (RMWI->getAlign() == cast<AtomicRMWInst>(I2)->getAlign() ||
+ IgnoreAlignment) &&
RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
diff --git a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
index 03009d53d63f4..067fec85e182b 100644
--- a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
+++ b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
@@ -19,11 +19,14 @@
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/SourceMgr.h"
+#include "gmock/gmock.h"
#include "gtest/gtest.h"
using namespace llvm;
using namespace IRSimilarity;
+using testing::SizeIs;
+
static std::unique_ptr<Module> makeLLVMModule(LLVMContext &Context,
StringRef ModuleStr) {
SMDiagnostic Err;
@@ -730,6 +733,162 @@ TEST(IRInstructionMapper, StoreDifferentAtomic) {
ASSERT_TRUE(UnsignedVec[0] != UnsignedVec[1]);
}
+// Checks that atomicrmw that have the different types are mapped to
+// different unsigned integers.
+TEST(IRInstructionMapper, AtomicRMWDifferentType) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i64* %b) {
+ bb0:
+ %1 = atomicrmw add i32* %a, i32 1 acquire
+ %2 = atomicrmw add i64* %b, i64 1 acquire
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
+// Checks that atomicrmw that have the different aligns are mapped to different
+// unsigned integers.
+TEST(IRInstructionMapper, AtomicRMWDifferentAlign) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i32* %b) {
+ bb0:
+ %1 = atomicrmw add i32* %a, i32 1 acquire, align 4
+ %2 = atomicrmw add i32* %b, i32 1 acquire, align 8
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
+// Checks that atomicrmw that have the different volatile settings are mapped to
+// different unsigned integers.
+TEST(IRInstructionMapper, AtomicRMWDifferentVolatile) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i32* %b) {
+ bb0:
+ %1 = atomicrmw volatile add i32* %a, i32 1 acquire
+ %2 = atomicrmw add i32* %b, i32 1 acquire
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
+// Checks that cmpxchg that have the different types are mapped to
+// different unsigned integers.
+TEST(IRInstructionMapper, AtomicCmpXchgDifferentType) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i64* %b) {
+ bb0:
+ %1 = cmpxchg i32* %a, i32 0, i32 1 monotonic monotonic
+ %2 = cmpxchg i64* %b, i64 0, i64 1 monotonic monotonic
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
+// Checks that cmpxchg that have the different aligns are mapped to different
+// unsigned integers.
+TEST(IRInstructionMapper, AtomicCmpXchgDifferentAlign) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i32* %b) {
+ bb0:
+ %1 = cmpxchg i32* %a, i32 0, i32 1 monotonic monotonic, align 4
+ %2 = cmpxchg i32* %b, i32 0, i32 1 monotonic monotonic, align 8
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
+// Checks that cmpxchg that have the different volatile settings are mapped to
+// different unsigned integers.
+TEST(IRInstructionMapper, AtomicCmpXchgDifferentVolatile) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i32* %b) {
+ bb0:
+ %1 = cmpxchg volatile i32* %a, i32 0, i32 1 monotonic monotonic
+ %2 = cmpxchg i32* %b, i32 0, i32 1 monotonic monotonic
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
// Checks that the branch is mapped to legal when the option is set.
TEST(IRInstructionMapper, BranchLegal) {
StringRef ModuleString = R"(
>From 77440a90f339ab2b0563198dcfc57c3712d39fa7 Mon Sep 17 00:00:00 2001
From: Ellis Hoag <ellishoag at meta.com>
Date: Tue, 26 Aug 2025 06:52:42 -0700
Subject: [PATCH 2/2] use ptr
---
.../Analysis/IRSimilarityIdentifierTest.cpp | 36 +++++++++----------
1 file changed, 18 insertions(+), 18 deletions(-)
diff --git a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
index 067fec85e182b..aa26a8e3469b4 100644
--- a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
+++ b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
@@ -737,10 +737,10 @@ TEST(IRInstructionMapper, StoreDifferentAtomic) {
// different unsigned integers.
TEST(IRInstructionMapper, AtomicRMWDifferentType) {
StringRef ModuleString = R"(
- define i32 @f(i32* %a, i64* %b) {
+ define i32 @f(ptr %a, ptr %b) {
bb0:
- %1 = atomicrmw add i32* %a, i32 1 acquire
- %2 = atomicrmw add i64* %b, i64 1 acquire
+ %1 = atomicrmw add ptr %a, i32 1 acquire
+ %2 = atomicrmw add ptr %b, i64 1 acquire
ret i32 0
})";
LLVMContext Context;
@@ -763,10 +763,10 @@ TEST(IRInstructionMapper, AtomicRMWDifferentType) {
// unsigned integers.
TEST(IRInstructionMapper, AtomicRMWDifferentAlign) {
StringRef ModuleString = R"(
- define i32 @f(i32* %a, i32* %b) {
+ define i32 @f(ptr %a, ptr %b) {
bb0:
- %1 = atomicrmw add i32* %a, i32 1 acquire, align 4
- %2 = atomicrmw add i32* %b, i32 1 acquire, align 8
+ %1 = atomicrmw add ptr %a, i32 1 acquire, align 4
+ %2 = atomicrmw add ptr %b, i32 1 acquire, align 8
ret i32 0
})";
LLVMContext Context;
@@ -789,10 +789,10 @@ TEST(IRInstructionMapper, AtomicRMWDifferentAlign) {
// different unsigned integers.
TEST(IRInstructionMapper, AtomicRMWDifferentVolatile) {
StringRef ModuleString = R"(
- define i32 @f(i32* %a, i32* %b) {
+ define i32 @f(ptr %a, ptr %b) {
bb0:
- %1 = atomicrmw volatile add i32* %a, i32 1 acquire
- %2 = atomicrmw add i32* %b, i32 1 acquire
+ %1 = atomicrmw volatile add ptr %a, i32 1 acquire
+ %2 = atomicrmw add ptr %b, i32 1 acquire
ret i32 0
})";
LLVMContext Context;
@@ -815,10 +815,10 @@ TEST(IRInstructionMapper, AtomicRMWDifferentVolatile) {
// different unsigned integers.
TEST(IRInstructionMapper, AtomicCmpXchgDifferentType) {
StringRef ModuleString = R"(
- define i32 @f(i32* %a, i64* %b) {
+ define i32 @f(ptr %a, ptr %b) {
bb0:
- %1 = cmpxchg i32* %a, i32 0, i32 1 monotonic monotonic
- %2 = cmpxchg i64* %b, i64 0, i64 1 monotonic monotonic
+ %1 = cmpxchg ptr %a, i32 0, i32 1 monotonic monotonic
+ %2 = cmpxchg ptr %b, i64 0, i64 1 monotonic monotonic
ret i32 0
})";
LLVMContext Context;
@@ -841,10 +841,10 @@ TEST(IRInstructionMapper, AtomicCmpXchgDifferentType) {
// unsigned integers.
TEST(IRInstructionMapper, AtomicCmpXchgDifferentAlign) {
StringRef ModuleString = R"(
- define i32 @f(i32* %a, i32* %b) {
+ define i32 @f(ptr %a, ptr %b) {
bb0:
- %1 = cmpxchg i32* %a, i32 0, i32 1 monotonic monotonic, align 4
- %2 = cmpxchg i32* %b, i32 0, i32 1 monotonic monotonic, align 8
+ %1 = cmpxchg ptr %a, i32 0, i32 1 monotonic monotonic, align 4
+ %2 = cmpxchg ptr %b, i32 0, i32 1 monotonic monotonic, align 8
ret i32 0
})";
LLVMContext Context;
@@ -867,10 +867,10 @@ TEST(IRInstructionMapper, AtomicCmpXchgDifferentAlign) {
// different unsigned integers.
TEST(IRInstructionMapper, AtomicCmpXchgDifferentVolatile) {
StringRef ModuleString = R"(
- define i32 @f(i32* %a, i32* %b) {
+ define i32 @f(ptr %a, ptr %b) {
bb0:
- %1 = cmpxchg volatile i32* %a, i32 0, i32 1 monotonic monotonic
- %2 = cmpxchg i32* %b, i32 0, i32 1 monotonic monotonic
+ %1 = cmpxchg volatile ptr %a, i32 0, i32 1 monotonic monotonic
+ %2 = cmpxchg ptr %b, i32 0, i32 1 monotonic monotonic
ret i32 0
})";
LLVMContext Context;
More information about the llvm-commits
mailing list