[llvm] Check identical alignment for atomic instructions (PR #155349)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Aug 25 20:35:53 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-llvm-ir
Author: Ellis Hoag (ellishg)
<details>
<summary>Changes</summary>
I noticed that `hasSameSpecialState()` checks alignment for `load`/`store` instructions, but not for `cmpxchg` or `atomicrmw`, which I assume is a bug. It looks like alignment for these instructions were added in https://github.com/llvm/llvm-project/commit/74c723757e69fbe7d85e42527d07b728113699ae.
---
Full diff: https://github.com/llvm/llvm-project/pull/155349.diff
2 Files Affected:
- (modified) llvm/lib/IR/Instruction.cpp (+5-1)
- (modified) llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp (+159)
``````````diff
diff --git a/llvm/lib/IR/Instruction.cpp b/llvm/lib/IR/Instruction.cpp
index 5e87b5ff941ad..6010e2a99cf01 100644
--- a/llvm/lib/IR/Instruction.cpp
+++ b/llvm/lib/IR/Instruction.cpp
@@ -864,7 +864,7 @@ const char *Instruction::getOpcodeName(unsigned OpCode) {
bool Instruction::hasSameSpecialState(const Instruction *I2,
bool IgnoreAlignment,
bool IntersectAttrs) const {
- auto I1 = this;
+ const auto *I1 = this;
assert(I1->getOpcode() == I2->getOpcode() &&
"Can not compare special state of different instructions");
@@ -917,6 +917,8 @@ bool Instruction::hasSameSpecialState(const Instruction *I2,
FI->getSyncScopeID() == cast<FenceInst>(I2)->getSyncScopeID();
if (const AtomicCmpXchgInst *CXI = dyn_cast<AtomicCmpXchgInst>(I1))
return CXI->isVolatile() == cast<AtomicCmpXchgInst>(I2)->isVolatile() &&
+ (CXI->getAlign() == cast<AtomicCmpXchgInst>(I2)->getAlign() ||
+ IgnoreAlignment) &&
CXI->isWeak() == cast<AtomicCmpXchgInst>(I2)->isWeak() &&
CXI->getSuccessOrdering() ==
cast<AtomicCmpXchgInst>(I2)->getSuccessOrdering() &&
@@ -927,6 +929,8 @@ bool Instruction::hasSameSpecialState(const Instruction *I2,
if (const AtomicRMWInst *RMWI = dyn_cast<AtomicRMWInst>(I1))
return RMWI->getOperation() == cast<AtomicRMWInst>(I2)->getOperation() &&
RMWI->isVolatile() == cast<AtomicRMWInst>(I2)->isVolatile() &&
+ (RMWI->getAlign() == cast<AtomicRMWInst>(I2)->getAlign() ||
+ IgnoreAlignment) &&
RMWI->getOrdering() == cast<AtomicRMWInst>(I2)->getOrdering() &&
RMWI->getSyncScopeID() == cast<AtomicRMWInst>(I2)->getSyncScopeID();
if (const ShuffleVectorInst *SVI = dyn_cast<ShuffleVectorInst>(I1))
diff --git a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
index 03009d53d63f4..067fec85e182b 100644
--- a/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
+++ b/llvm/unittests/Analysis/IRSimilarityIdentifierTest.cpp
@@ -19,11 +19,14 @@
#include "llvm/Support/Allocator.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/SourceMgr.h"
+#include "gmock/gmock.h"
#include "gtest/gtest.h"
using namespace llvm;
using namespace IRSimilarity;
+using testing::SizeIs;
+
static std::unique_ptr<Module> makeLLVMModule(LLVMContext &Context,
StringRef ModuleStr) {
SMDiagnostic Err;
@@ -730,6 +733,162 @@ TEST(IRInstructionMapper, StoreDifferentAtomic) {
ASSERT_TRUE(UnsignedVec[0] != UnsignedVec[1]);
}
+// Checks that atomicrmw that have the different types are mapped to
+// different unsigned integers.
+TEST(IRInstructionMapper, AtomicRMWDifferentType) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i64* %b) {
+ bb0:
+ %1 = atomicrmw add i32* %a, i32 1 acquire
+ %2 = atomicrmw add i64* %b, i64 1 acquire
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
+// Checks that atomicrmw that have the different aligns are mapped to different
+// unsigned integers.
+TEST(IRInstructionMapper, AtomicRMWDifferentAlign) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i32* %b) {
+ bb0:
+ %1 = atomicrmw add i32* %a, i32 1 acquire, align 4
+ %2 = atomicrmw add i32* %b, i32 1 acquire, align 8
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
+// Checks that atomicrmw that have the different volatile settings are mapped to
+// different unsigned integers.
+TEST(IRInstructionMapper, AtomicRMWDifferentVolatile) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i32* %b) {
+ bb0:
+ %1 = atomicrmw volatile add i32* %a, i32 1 acquire
+ %2 = atomicrmw add i32* %b, i32 1 acquire
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
+// Checks that cmpxchg that have the different types are mapped to
+// different unsigned integers.
+TEST(IRInstructionMapper, AtomicCmpXchgDifferentType) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i64* %b) {
+ bb0:
+ %1 = cmpxchg i32* %a, i32 0, i32 1 monotonic monotonic
+ %2 = cmpxchg i64* %b, i64 0, i64 1 monotonic monotonic
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
+// Checks that cmpxchg that have the different aligns are mapped to different
+// unsigned integers.
+TEST(IRInstructionMapper, AtomicCmpXchgDifferentAlign) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i32* %b) {
+ bb0:
+ %1 = cmpxchg i32* %a, i32 0, i32 1 monotonic monotonic, align 4
+ %2 = cmpxchg i32* %b, i32 0, i32 1 monotonic monotonic, align 8
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
+// Checks that cmpxchg that have the different volatile settings are mapped to
+// different unsigned integers.
+TEST(IRInstructionMapper, AtomicCmpXchgDifferentVolatile) {
+ StringRef ModuleString = R"(
+ define i32 @f(i32* %a, i32* %b) {
+ bb0:
+ %1 = cmpxchg volatile i32* %a, i32 0, i32 1 monotonic monotonic
+ %2 = cmpxchg i32* %b, i32 0, i32 1 monotonic monotonic
+ ret i32 0
+ })";
+ LLVMContext Context;
+ std::unique_ptr<Module> M = makeLLVMModule(Context, ModuleString);
+
+ std::vector<IRInstructionData *> InstrList;
+ std::vector<unsigned> UnsignedVec;
+
+ SpecificBumpPtrAllocator<IRInstructionData> InstDataAllocator;
+ SpecificBumpPtrAllocator<IRInstructionDataList> IDLAllocator;
+ IRInstructionMapper Mapper(&InstDataAllocator, &IDLAllocator);
+ getVectors(*M, Mapper, InstrList, UnsignedVec);
+
+ ASSERT_EQ(InstrList.size(), UnsignedVec.size());
+ ASSERT_THAT(UnsignedVec, SizeIs(3));
+ EXPECT_NE(UnsignedVec[0], UnsignedVec[1]);
+}
+
// Checks that the branch is mapped to legal when the option is set.
TEST(IRInstructionMapper, BranchLegal) {
StringRef ModuleString = R"(
``````````
</details>
https://github.com/llvm/llvm-project/pull/155349
More information about the llvm-commits
mailing list