[llvm] [PowerPC] enable AtomicExpandImpl::expandAtomicCmpXchg for powerpc (PR #142395)
via llvm-commits
llvm-commits at lists.llvm.org
Mon Jun 2 07:01:10 PDT 2025
github-actions[bot] wrote:
<!--LLVM CODE FORMAT COMMENT: {clang-format}-->
:warning: C/C++ code formatter, clang-format found issues in your code. :warning:
<details>
<summary>
You can test this locally with the following command:
</summary>
``````````bash
git-clang-format --diff HEAD~1 HEAD --extensions h,cpp -- llvm/include/llvm/CodeGen/TargetLowering.h llvm/lib/Target/PowerPC/PPCISelLowering.cpp llvm/lib/Target/PowerPC/PPCISelLowering.h
``````````
</details>
<details>
<summary>
View the diff from clang-format here.
</summary>
``````````diff
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 5bea6da36..a49fc6c41 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -254,20 +254,20 @@ public:
/// support for these atomic instructions, and also have different options
/// w.r.t. what they should expand to.
enum class AtomicExpansionKind {
- None, // Don't expand the instruction.
- CastToInteger, // Cast the atomic instruction to another type, e.g. from
- // floating-point to integer type.
+ None, // Don't expand the instruction.
+ CastToInteger, // Cast the atomic instruction to another type, e.g. from
+ // floating-point to integer type.
LLSC, // Expand the instruction into loadlinked/storeconditional; used
// by ARM/AArch64/PowerPC.
LLOnly, // Expand the (load) instruction into just a load-linked, which has
// greater atomic guarantees than a normal load.
CmpXChg, // Expand the instruction into cmpxchg; used by at least X86.
- MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
- BitTestIntrinsic, // Use a target-specific intrinsic for special bit
- // operations; used by X86.
- CmpArithIntrinsic,// Use a target-specific intrinsic for special compare
- // operations; used by X86.
- Expand, // Generic expansion in terms of other atomic operations.
+ MaskedIntrinsic, // Use a target-specific intrinsic for the LL/SC loop.
+ BitTestIntrinsic, // Use a target-specific intrinsic for special bit
+ // operations; used by X86.
+ CmpArithIntrinsic, // Use a target-specific intrinsic for special compare
+ // operations; used by X86.
+ Expand, // Generic expansion in terms of other atomic operations.
// Rewrite to a non-atomic form for use in a known non-preemptible
// environment.
diff --git a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
index 287145e3d..930554769 100644
--- a/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
+++ b/llvm/lib/Target/PowerPC/PPCISelLowering.cpp
@@ -1475,7 +1475,7 @@ PPCTargetLowering::PPCTargetLowering(const PPCTargetMachine &TM,
setMinimumJumpTableEntries(PPCMinimumJumpTableEntries);
setMinFunctionAlignment(Align(4));
- if(Subtarget.hasPartwordAtomics())
+ if (Subtarget.hasPartwordAtomics())
setMinCmpXchgSizeInBits(8);
else
setMinCmpXchgSizeInBits(32);
@@ -12738,12 +12738,13 @@ Value *PPCTargetLowering::emitStoreConditional(IRBuilderBase &Builder,
break;
}
- if(SZ ==8 || SZ==16)
- Val = Builder.CreateZExt(Val, Builder.getIntNTy(32));;
+ if (SZ == 8 || SZ == 16)
+ Val = Builder.CreateZExt(Val, Builder.getIntNTy(32));
+ ;
Value *Call = Builder.CreateIntrinsic(IntID, {Addr, Val},
/*FMFSource=*/nullptr, "stcx");
- Value *Not = Builder.CreateXor(Call,Builder.getInt32(1));
+ Value *Not = Builder.CreateXor(Call, Builder.getInt32(1));
return Not;
}
@@ -19709,7 +19710,7 @@ PPCTargetLowering::shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const {
if (shouldInlineQuadwordAtomics() && Size == 128)
return AtomicExpansionKind::MaskedIntrinsic;
return AtomicExpansionKind::LLSC;
- //return TargetLowering::shouldExpandAtomicCmpXchgInIR(AI);
+ // return TargetLowering::shouldExpandAtomicCmpXchgInIR(AI);
}
static Intrinsic::ID
``````````
</details>
https://github.com/llvm/llvm-project/pull/142395
More information about the llvm-commits
mailing list