[llvm] 04c122b - [X86] Preserve !pcsections on atomic intrinsics
Marco Elver via llvm-commits
llvm-commits at lists.llvm.org
Wed Mar 29 01:55:24 PDT 2023
Author: Marco Elver
Date: 2023-03-29T10:27:24+02:00
New Revision: 04c122b1f13ed77a0643338bd26e8f19c22532a3
URL: https://github.com/llvm/llvm-project/commit/04c122b1f13ed77a0643338bd26e8f19c22532a3
DIFF: https://github.com/llvm/llvm-project/commit/04c122b1f13ed77a0643338bd26e8f19c22532a3.diff
LOG: [X86] Preserve !pcsections on atomic intrinsics
Preserve !pcsections metadata on X86-only atomic intrinsics when
expanding higher-level atomics.
Differential Revision: https://reviews.llvm.org/D147123
Added:
Modified:
llvm/lib/Target/X86/X86ISelLowering.cpp
llvm/test/CodeGen/X86/pcsections-atomics.ll
Removed:
################################################################################
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index b2b816f042393..68af565195c51 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -32127,6 +32127,7 @@ X86TargetLowering::shouldExpandLogicAtomicRMWInIR(AtomicRMWInst *AI) const {
void X86TargetLowering::emitBitTestAtomicRMWIntrinsic(AtomicRMWInst *AI) const {
IRBuilder<> Builder(AI);
+ Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
Intrinsic::ID IID_C = Intrinsic::not_intrinsic;
Intrinsic::ID IID_I = Intrinsic::not_intrinsic;
switch (AI->getOperation()) {
@@ -32267,6 +32268,7 @@ static bool shouldExpandCmpArithRMWInIR(AtomicRMWInst *AI) {
void X86TargetLowering::emitCmpArithAtomicRMWIntrinsic(
AtomicRMWInst *AI) const {
IRBuilder<> Builder(AI);
+ Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
Instruction *TempI = nullptr;
LLVMContext &Ctx = AI->getContext();
ICmpInst *ICI = dyn_cast<ICmpInst>(AI->user_back());
@@ -32392,6 +32394,7 @@ X86TargetLowering::lowerIdempotentRMWIntoFencedLoad(AtomicRMWInst *AI) const {
return nullptr;
IRBuilder<> Builder(AI);
+ Builder.CollectMetadataToCopy(AI, {LLVMContext::MD_pcsections});
Module *M = Builder.GetInsertBlock()->getParent()->getParent();
auto SSID = AI->getSyncScopeID();
// We must restrict the ordering to avoid generating loads with Release or
diff --git a/llvm/test/CodeGen/X86/pcsections-atomics.ll b/llvm/test/CodeGen/X86/pcsections-atomics.ll
index 6881cb373cf71..8567865cbbe10 100644
--- a/llvm/test/CodeGen/X86/pcsections-atomics.ll
+++ b/llvm/test/CodeGen/X86/pcsections-atomics.ll
@@ -9916,4 +9916,67 @@ entry:
ret void
}
+define i64 @atomic_use_cond(ptr %a) {
+; O0-LABEL: atomic_use_cond:
+; O0: # %bb.0: # %entry
+; O0-NEXT: .Lpcsection412:
+; O0-NEXT: lock decq (%rdi)
+; O0-NEXT: .Lpcsection413:
+; O0-NEXT: sete %al
+; O0-NEXT: testb $1, %al
+; O0-NEXT: je .LBB197_2
+; O0-NEXT: # %bb.1: # %then
+; O0-NEXT: movl $1, %eax
+; O0-NEXT: retq
+; O0-NEXT: .LBB197_2: # %else
+; O0-NEXT: movl $2, %eax
+; O0-NEXT: retq
+;
+; O1-LABEL: atomic_use_cond:
+; O1: # %bb.0: # %entry
+; O1-NEXT: .Lpcsection327:
+; O1-NEXT: lock decq (%rdi)
+; O1-NEXT: jne .LBB197_2
+; O1-NEXT: # %bb.1: # %then
+; O1-NEXT: movl $1, %eax
+; O1-NEXT: retq
+; O1-NEXT: .LBB197_2: # %else
+; O1-NEXT: movl $2, %eax
+; O1-NEXT: retq
+;
+; O2-LABEL: atomic_use_cond:
+; O2: # %bb.0: # %entry
+; O2-NEXT: .Lpcsection327:
+; O2-NEXT: lock decq (%rdi)
+; O2-NEXT: jne .LBB197_2
+; O2-NEXT: # %bb.1: # %then
+; O2-NEXT: movl $1, %eax
+; O2-NEXT: retq
+; O2-NEXT: .LBB197_2: # %else
+; O2-NEXT: movl $2, %eax
+; O2-NEXT: retq
+;
+; O3-LABEL: atomic_use_cond:
+; O3: # %bb.0: # %entry
+; O3-NEXT: .Lpcsection327:
+; O3-NEXT: lock decq (%rdi)
+; O3-NEXT: jne .LBB197_2
+; O3-NEXT: # %bb.1: # %then
+; O3-NEXT: movl $1, %eax
+; O3-NEXT: retq
+; O3-NEXT: .LBB197_2: # %else
+; O3-NEXT: movl $2, %eax
+; O3-NEXT: retq
+entry:
+ %x = atomicrmw sub ptr %a, i64 1 seq_cst, align 8, !pcsections !0
+ %y = icmp eq i64 %x, 1
+ br i1 %y, label %then, label %else
+
+then:
+ ret i64 1
+
+else:
+ ret i64 2
+}
+
!0 = !{!"somesection"}
More information about the llvm-commits
mailing list