[llvm] [CodeGen][TLI] Allow targets to custom expand atomic load/stores (PR #154708)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 21 02:34:13 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-backend-x86
Author: Pierre van Houtryve (Pierre-vh)
<details>
<summary>Changes</summary>
Loads didn't have the `Expand` option in `AtomicExpandPass`. Stores had `Expand` but it didn't defer to TLI and instead did an action directly.
Move the old behavior to a `XChg` expansion and make `Expand` behave like all other instructions.
---
Full diff: https://github.com/llvm/llvm-project/pull/154708.diff
6 Files Affected:
- (modified) llvm/include/llvm/CodeGen/TargetLowering.h (+15-1)
- (modified) llvm/lib/CodeGen/AtomicExpandPass.cpp (+9-3)
- (modified) llvm/lib/Target/AArch64/AArch64ISelLowering.cpp (+2-2)
- (modified) llvm/lib/Target/ARM/ARMISelLowering.cpp (+1-1)
- (modified) llvm/lib/Target/Hexagon/HexagonISelLowering.cpp (+1-1)
- (modified) llvm/lib/Target/X86/X86ISelLowering.cpp (+1-1)
``````````diff
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 4480ced637456..8c5f03ce526b4 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -262,7 +262,9 @@ class LLVM_ABI TargetLoweringBase {
LLOnly, // Expand the (load) instruction into just a load-linked, which has
// greater atomic guarantees than a normal load.
CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
- MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
+ MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
+ XChg, // Expand a store too large to be atomic into a xchg, then re-process
+ // it.
BitTestIntrinsic, // Use a target-specific intrinsic for special bit
// operations; used by X86.
CmpArithIntrinsic, // Use a target-specific intrinsic for special compare
@@ -2273,6 +2275,18 @@ class LLVM_ABI TargetLoweringBase {
"Generic atomicrmw expansion unimplemented on this target");
}
+ /// Perform a atomic store using a target-specific way.
+ virtual void emitExpandAtomicStore(StoreInst *SI) const {
+ llvm_unreachable(
+ "Generic atomic store expansion unimplemented on this target");
+ }
+
+ /// Perform a atomic load using a target-specific way.
+ virtual void emitExpandAtomicLoad(LoadInst *LI) const {
+ llvm_unreachable(
+ "Generic atomic load expansion unimplemented on this target");
+ }
+
/// Perform a cmpxchg expansion using a target-specific method.
virtual void emitExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) const {
llvm_unreachable("Generic cmpxchg expansion unimplemented on this target");
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index 278dd6560e736..b617b53385b55 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -84,7 +84,7 @@ class AtomicExpandImpl {
bool expandAtomicLoadToCmpXchg(LoadInst *LI);
StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI);
bool tryExpandAtomicStore(StoreInst *SI);
- void expandAtomicStore(StoreInst *SI);
+ void expandAtomicStoreToXChg(StoreInst *SI);
bool tryExpandAtomicRMW(AtomicRMWInst *AI);
AtomicRMWInst *convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI);
Value *
@@ -537,6 +537,9 @@ bool AtomicExpandImpl::tryExpandAtomicLoad(LoadInst *LI) {
case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
LI->setAtomic(AtomicOrdering::NotAtomic);
return true;
+ case TargetLoweringBase::AtomicExpansionKind::Expand:
+ TLI->emitExpandAtomicLoad(LI);
+ return true;
default:
llvm_unreachable("Unhandled case in tryExpandAtomicLoad");
}
@@ -547,7 +550,10 @@ bool AtomicExpandImpl::tryExpandAtomicStore(StoreInst *SI) {
case TargetLoweringBase::AtomicExpansionKind::None:
return false;
case TargetLoweringBase::AtomicExpansionKind::Expand:
- expandAtomicStore(SI);
+ TLI->emitExpandAtomicStore(SI);
+ return true;
+ case TargetLoweringBase::AtomicExpansionKind::XChg:
+ expandAtomicStoreToXChg(SI);
return true;
case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
SI->setAtomic(AtomicOrdering::NotAtomic);
@@ -620,7 +626,7 @@ StoreInst *AtomicExpandImpl::convertAtomicStoreToIntegerType(StoreInst *SI) {
return NewSI;
}
-void AtomicExpandImpl::expandAtomicStore(StoreInst *SI) {
+void AtomicExpandImpl::expandAtomicStoreToXChg(StoreInst *SI) {
// This function is only called on atomic stores that are too large to be
// atomic if implemented as a native store. So we replace them by an
// atomic swap, that can be implemented for example as a ldrex/strex on ARM
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d168cc8d1bd06..b8bd726cd8e6c 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -28422,10 +28422,10 @@ AArch64TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
if (isOpSuitableForRCPC3(SI))
return AtomicExpansionKind::None;
if (isOpSuitableForLSE128(SI))
- return AtomicExpansionKind::Expand;
+ return AtomicExpansionKind::XChg;
if (isOpSuitableForLDPSTP(SI))
return AtomicExpansionKind::None;
- return AtomicExpansionKind::Expand;
+ return AtomicExpansionKind::XChg;
}
// Loads and stores less than 128-bits are already atomic; ones above that
diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp
index 830156359e9e8..dd5dba402173b 100644
--- a/llvm/lib/Target/ARM/ARMISelLowering.cpp
+++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp
@@ -21236,7 +21236,7 @@ ARMTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
has64BitAtomicStore = Subtarget->hasV6Ops();
unsigned Size = SI->getValueOperand()->getType()->getPrimitiveSizeInBits();
- return Size == 64 && has64BitAtomicStore ? AtomicExpansionKind::Expand
+ return Size == 64 && has64BitAtomicStore ? AtomicExpansionKind::XChg
: AtomicExpansionKind::None;
}
diff --git a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
index c54b67ccd8843..e44626868454a 100644
--- a/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
+++ b/llvm/lib/Target/Hexagon/HexagonISelLowering.cpp
@@ -3938,7 +3938,7 @@ TargetLowering::AtomicExpansionKind
HexagonTargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
// Do not expand loads and stores that don't exceed 64 bits.
return SI->getValueOperand()->getType()->getPrimitiveSizeInBits() > 64
- ? AtomicExpansionKind::Expand
+ ? AtomicExpansionKind::XChg
: AtomicExpansionKind::None;
}
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 19131fbd4102b..653b032039aa7 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -31723,7 +31723,7 @@ X86TargetLowering::shouldExpandAtomicStoreInIR(StoreInst *SI) const {
return AtomicExpansionKind::None;
}
- return needsCmpXchgNb(MemType) ? AtomicExpansionKind::Expand
+ return needsCmpXchgNb(MemType) ? AtomicExpansionKind::XChg
: AtomicExpansionKind::None;
}
``````````
</details>
https://github.com/llvm/llvm-project/pull/154708
More information about the llvm-commits
mailing list