[llvm] 8b9b0fd - [CodeGen][TLI] Allow targets to custom expand atomic load/stores (#154708)
via llvm-commits
llvm-commits at lists.llvm.org
Thu Aug 28 00:58:13 PDT 2025
Author: Pierre van Houtryve
Date: 2025-08-28T09:58:10+02:00
New Revision: 8b9b0fdedf8cf3d6ea4ed65ed14250361d19428e
URL: https://github.com/llvm/llvm-project/commit/8b9b0fdedf8cf3d6ea4ed65ed14250361d19428e
DIFF: https://github.com/llvm/llvm-project/commit/8b9b0fdedf8cf3d6ea4ed65ed14250361d19428e.diff
LOG: [CodeGen][TLI] Allow targets to custom expand atomic load/stores (#154708)
Loads didn't have the `Expand` option in `AtomicExpandPass`. Stores had
`Expand` but it didn't defer to TLI and instead did an action directly.
Add a `CustomExpand` option and make it always map to the TLI hook for
all cases. The `Expand` option now refers to a generic expansion for all
targets.
Added:
Modified:
llvm/include/llvm/CodeGen/TargetLowering.h
llvm/lib/CodeGen/AtomicExpandPass.cpp
llvm/lib/Target/AMDGPU/SIISelLowering.cpp
llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
Removed:
################################################################################
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index cb49b6dabd28a..438b6ff55c85f 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -268,6 +268,7 @@ class LLVM_ABI TargetLoweringBase {
CmpArithIntrinsic, // Use a target-specific intrinsic for special compare
// operations; used by X86.
Expand, // Generic expansion in terms of other atomic operations.
+ CustomExpand, // Custom target-specific expansion using TLI hooks.
// Rewrite to a non-atomic form for use in a known non-preemptible
// environment.
@@ -2275,6 +2276,18 @@ class LLVM_ABI TargetLoweringBase {
"Generic atomicrmw expansion unimplemented on this target");
}
+ /// Perform a atomic store using a target-specific way.
+ virtual void emitExpandAtomicStore(StoreInst *SI) const {
+ llvm_unreachable(
+ "Generic atomic store expansion unimplemented on this target");
+ }
+
+ /// Perform a atomic load using a target-specific way.
+ virtual void emitExpandAtomicLoad(LoadInst *LI) const {
+ llvm_unreachable(
+ "Generic atomic load expansion unimplemented on this target");
+ }
+
/// Perform a cmpxchg expansion using a target-specific method.
virtual void emitExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) const {
llvm_unreachable("Generic cmpxchg expansion unimplemented on this target");
@@ -2379,8 +2392,8 @@ class LLVM_ABI TargetLoweringBase {
}
/// Returns how the given (atomic) store should be expanded by the IR-level
- /// AtomicExpand pass into. For instance AtomicExpansionKind::Expand will try
- /// to use an atomicrmw xchg.
+ /// AtomicExpand pass into. For instance AtomicExpansionKind::CustomExpand
+ /// will try to use an atomicrmw xchg.
virtual AtomicExpansionKind shouldExpandAtomicStoreInIR(StoreInst *SI) const {
return AtomicExpansionKind::None;
}
diff --git a/llvm/lib/CodeGen/AtomicExpandPass.cpp b/llvm/lib/CodeGen/AtomicExpandPass.cpp
index 278dd6560e736..601185d0d3cb2 100644
--- a/llvm/lib/CodeGen/AtomicExpandPass.cpp
+++ b/llvm/lib/CodeGen/AtomicExpandPass.cpp
@@ -84,7 +84,7 @@ class AtomicExpandImpl {
bool expandAtomicLoadToCmpXchg(LoadInst *LI);
StoreInst *convertAtomicStoreToIntegerType(StoreInst *SI);
bool tryExpandAtomicStore(StoreInst *SI);
- void expandAtomicStore(StoreInst *SI);
+ void expandAtomicStoreToXChg(StoreInst *SI);
bool tryExpandAtomicRMW(AtomicRMWInst *AI);
AtomicRMWInst *convertAtomicXchgToIntegerType(AtomicRMWInst *RMWI);
Value *
@@ -537,6 +537,9 @@ bool AtomicExpandImpl::tryExpandAtomicLoad(LoadInst *LI) {
case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
LI->setAtomic(AtomicOrdering::NotAtomic);
return true;
+ case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
+ TLI->emitExpandAtomicLoad(LI);
+ return true;
default:
llvm_unreachable("Unhandled case in tryExpandAtomicLoad");
}
@@ -546,8 +549,11 @@ bool AtomicExpandImpl::tryExpandAtomicStore(StoreInst *SI) {
switch (TLI->shouldExpandAtomicStoreInIR(SI)) {
case TargetLoweringBase::AtomicExpansionKind::None:
return false;
+ case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
+ TLI->emitExpandAtomicStore(SI);
+ return true;
case TargetLoweringBase::AtomicExpansionKind::Expand:
- expandAtomicStore(SI);
+ expandAtomicStoreToXChg(SI);
return true;
case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
SI->setAtomic(AtomicOrdering::NotAtomic);
@@ -620,7 +626,7 @@ StoreInst *AtomicExpandImpl::convertAtomicStoreToIntegerType(StoreInst *SI) {
return NewSI;
}
-void AtomicExpandImpl::expandAtomicStore(StoreInst *SI) {
+void AtomicExpandImpl::expandAtomicStoreToXChg(StoreInst *SI) {
// This function is only called on atomic stores that are too large to be
// atomic if implemented as a native store. So we replace them by an
// atomic swap, that can be implemented for example as a ldrex/strex on ARM
@@ -741,7 +747,7 @@ bool AtomicExpandImpl::tryExpandAtomicRMW(AtomicRMWInst *AI) {
}
case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
return lowerAtomicRMWInst(AI);
- case TargetLoweringBase::AtomicExpansionKind::Expand:
+ case TargetLoweringBase::AtomicExpansionKind::CustomExpand:
TLI->emitExpandAtomicRMW(AI);
return true;
default:
@@ -1695,7 +1701,7 @@ bool AtomicExpandImpl::tryExpandAtomicCmpXchg(AtomicCmpXchgInst *CI) {
return true;
case TargetLoweringBase::AtomicExpansionKind::NotAtomic:
return lowerAtomicCmpXchgInst(CI);
- case TargetLoweringBase::AtomicExpansionKind::Expand: {
+ case TargetLoweringBase::AtomicExpansionKind::CustomExpand: {
TLI->emitExpandAtomicCmpXchg(CI);
return true;
}
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index 66c1dfc71c2f5..080e30d428660 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -17823,7 +17823,7 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
if (AS == AMDGPUAS::FLAT_ADDRESS &&
DL.getTypeSizeInBits(RMW->getType()) == 64 &&
flatInstrMayAccessPrivate(RMW))
- return AtomicExpansionKind::Expand;
+ return AtomicExpansionKind::CustomExpand;
auto ReportUnsafeHWInst = [=](TargetLowering::AtomicExpansionKind Kind) {
OptimizationRemarkEmitter ORE(RMW->getFunction());
@@ -17898,7 +17898,7 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
// does. InstCombine transforms these with 0 to or, so undo that.
if (Constant *ConstVal = dyn_cast<Constant>(RMW->getValOperand());
ConstVal && ConstVal->isNullValue())
- return AtomicExpansionKind::Expand;
+ return AtomicExpansionKind::CustomExpand;
}
// If the allocation could be in remote, fine-grained memory, the rmw
@@ -18027,9 +18027,9 @@ SITargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *RMW) const {
// fadd.
if (Subtarget->hasLDSFPAtomicAddF32()) {
if (RMW->use_empty() && Subtarget->hasAtomicFaddNoRtnInsts())
- return AtomicExpansionKind::Expand;
+ return AtomicExpansionKind::CustomExpand;
if (!RMW->use_empty() && Subtarget->hasAtomicFaddRtnInsts())
- return AtomicExpansionKind::Expand;
+ return AtomicExpansionKind::CustomExpand;
}
}
}
@@ -18109,7 +18109,7 @@ SITargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CmpX) const {
// If a 64-bit flat atomic may alias private, we need to avoid using the
// atomic in the private case.
- return DL.getTypeSizeInBits(ValTy) == 64 ? AtomicExpansionKind::Expand
+ return DL.getTypeSizeInBits(ValTy) == 64 ? AtomicExpansionKind::CustomExpand
: AtomicExpansionKind::None;
}
diff --git a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
index 948362f57904f..664abffc6bff7 100644
--- a/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
+++ b/llvm/lib/Target/LoongArch/LoongArchISelLowering.cpp
@@ -7949,7 +7949,7 @@ LoongArchTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
if (Size < 32 && (AI->getOperation() == AtomicRMWInst::And ||
AI->getOperation() == AtomicRMWInst::Or ||
AI->getOperation() == AtomicRMWInst::Xor))
- return AtomicExpansionKind::Expand;
+ return AtomicExpansionKind::CustomExpand;
if (AI->getOperation() == AtomicRMWInst::Nand || Size < 32)
return AtomicExpansionKind::CmpXChg;
}
More information about the llvm-commits
mailing list