[llvm] [ObjCARC] Don't sink objc_retain past atomic writes (PR #184113)

Marina Taylor via llvm-commits llvm-commits at lists.llvm.org
Mon Mar 30 05:53:11 PDT 2026


https://github.com/citymarina updated https://github.com/llvm/llvm-project/pull/184113

>From 4dae701b2abee0269720d06f5172493c5711db16 Mon Sep 17 00:00:00 2001
From: Marina Taylor <marina_taylor at apple.com>
Date: Fri, 27 Feb 2026 15:26:23 +0000
Subject: [PATCH 1/2] [ObjCARC] Don't sink objc_retain past releasing atomics

The releasing atomic may cause another thread to objc_release the pointer, so consider these instructions as potentially decrementing refcounts.

This patch is a synthesis of several AI models' outputs, as well as work by David Kilzer.

Co-authored-by: David Kilzer <ddkilzer at apple.com>
Assisted-by: claude

rdar://152185192
---
 .../Transforms/ObjCARC/DependencyAnalysis.cpp | 11 ++-
 .../Transforms/ObjCARC/sink-past-atomic.ll    | 97 +++++++++++++++++++
 2 files changed, 107 insertions(+), 1 deletion(-)
 create mode 100644 llvm/test/Transforms/ObjCARC/sink-past-atomic.ll

diff --git a/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp b/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
index b4cc00033e720..9706093102dae 100644
--- a/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
+++ b/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
@@ -24,6 +24,7 @@
 #include "ProvenanceAnalysis.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/IR/CFG.h"
+#include "llvm/Support/AtomicOrdering.h"
 
 using namespace llvm;
 using namespace llvm::objcarc;
@@ -67,7 +68,15 @@ bool llvm::objcarc::CanDecrementRefCount(const Instruction *Inst,
                                          const Value *Ptr,
                                          ProvenanceAnalysis &PA,
                                          ARCInstKind Class) {
-  // First perform a quick check if Class can not touch ref counts.
+  // Atomic RMW and CmpXchg instructions with release or stronger ordering
+  // publish memory to other threads, which may then read the stored pointer and
+  // release it. Treat these as potentially decrementing refcounts.
+  if (const auto *RMW = dyn_cast<AtomicRMWInst>(Inst))
+    return isReleaseOrStronger(RMW->getOrdering());
+  if (const auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(Inst))
+    return isReleaseOrStronger(CmpXchg->getSuccessOrdering());
+
+  // Perform a quick check if Class can not touch ref counts.
   if (!CanDecrementRefCount(Class))
     return false;
 
diff --git a/llvm/test/Transforms/ObjCARC/sink-past-atomic.ll b/llvm/test/Transforms/ObjCARC/sink-past-atomic.ll
new file mode 100644
index 0000000000000..796909cb41c76
--- /dev/null
+++ b/llvm/test/Transforms/ObjCARC/sink-past-atomic.ll
@@ -0,0 +1,97 @@
+; RUN: opt -passes=objc-arc -S < %s | FileCheck %s
+
+target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
+
+declare ptr @llvm.objc.retain(ptr)
+declare void @llvm.objc.release(ptr)
+
+; Retain must not sink past an atomicrmw with release or stronger ordering.
+
+define void @test_atomicrmw_release(ptr %obj, ptr %atomic_slot) {
+; CHECK-LABEL: @test_atomicrmw_release
+; CHECK: call ptr @llvm.objc.retain
+; CHECK: atomicrmw
+entry:
+  %obj_as_int = ptrtoint ptr %obj to i64
+  %retained = call ptr @llvm.objc.retain(ptr %obj)
+  %old_value = atomicrmw xchg ptr %atomic_slot, i64 %obj_as_int release, align 8
+  %old_obj = inttoptr i64 %old_value to ptr
+  call void @llvm.objc.release(ptr %old_obj)
+  call void @llvm.objc.release(ptr %obj)
+  ret void
+}
+
+define void @test_atomicrmw_acqrel(ptr %obj, ptr %atomic_slot) {
+; CHECK-LABEL: @test_atomicrmw_acqrel
+; CHECK: call ptr @llvm.objc.retain
+; CHECK: atomicrmw
+entry:
+  %obj_as_int = ptrtoint ptr %obj to i64
+  %retained = call ptr @llvm.objc.retain(ptr %obj)
+  %old_value = atomicrmw xchg ptr %atomic_slot, i64 %obj_as_int acq_rel, align 8
+  %old_obj = inttoptr i64 %old_value to ptr
+  call void @llvm.objc.release(ptr %old_obj)
+  call void @llvm.objc.release(ptr %obj)
+  ret void
+}
+
+define void @test_atomicrmw_seqcst(ptr %obj, ptr %atomic_slot) {
+; CHECK-LABEL: @test_atomicrmw_seqcst
+; CHECK: call ptr @llvm.objc.retain
+; CHECK: atomicrmw
+entry:
+  %obj_as_int = ptrtoint ptr %obj to i64
+  %retained = call ptr @llvm.objc.retain(ptr %obj)
+  %old_value = atomicrmw xchg ptr %atomic_slot, i64 %obj_as_int seq_cst, align 8
+  %old_obj = inttoptr i64 %old_value to ptr
+  call void @llvm.objc.release(ptr %old_obj)
+  call void @llvm.objc.release(ptr %obj)
+  ret void
+}
+
+; Retain must not sink past a cmpxchg with release or stronger success ordering.
+
+define void @test_cmpxchg_release(ptr %obj, ptr %atomic_slot, i64 %expected) {
+; CHECK-LABEL: @test_cmpxchg_release
+; CHECK: call ptr @llvm.objc.retain
+; CHECK: cmpxchg
+entry:
+  %obj_as_int = ptrtoint ptr %obj to i64
+  %retained = call ptr @llvm.objc.retain(ptr %obj)
+  %res = cmpxchg ptr %atomic_slot, i64 %expected, i64 %obj_as_int release seq_cst, align 8
+  %old_int = extractvalue { i64, i1 } %res, 0
+  %old_obj = inttoptr i64 %old_int to ptr
+  call void @llvm.objc.release(ptr %old_obj)
+  call void @llvm.objc.release(ptr %obj)
+  ret void
+}
+
+define void @test_cmpxchg_acqrel(ptr %obj, ptr %atomic_slot, i64 %expected) {
+; CHECK-LABEL: @test_cmpxchg_acqrel
+; CHECK: call ptr @llvm.objc.retain
+; CHECK: cmpxchg
+entry:
+  %obj_as_int = ptrtoint ptr %obj to i64
+  %retained = call ptr @llvm.objc.retain(ptr %obj)
+  %res = cmpxchg ptr %atomic_slot, i64 %expected, i64 %obj_as_int acq_rel seq_cst, align 8
+  %old_int = extractvalue { i64, i1 } %res, 0
+  %old_obj = inttoptr i64 %old_int to ptr
+  call void @llvm.objc.release(ptr %old_obj)
+  call void @llvm.objc.release(ptr %obj)
+  ret void
+}
+
+define void @test_cmpxchg_seqcst(ptr %obj, ptr %atomic_slot, i64 %expected) {
+; CHECK-LABEL: @test_cmpxchg_seqcst
+; CHECK: call ptr @llvm.objc.retain
+; CHECK: cmpxchg
+entry:
+  %obj_as_int = ptrtoint ptr %obj to i64
+  %retained = call ptr @llvm.objc.retain(ptr %obj)
+  %res = cmpxchg ptr %atomic_slot, i64 %expected, i64 %obj_as_int seq_cst seq_cst, align 8
+  %old_int = extractvalue { i64, i1 } %res, 0
+  %old_obj = inttoptr i64 %old_int to ptr
+  call void @llvm.objc.release(ptr %old_obj)
+  call void @llvm.objc.release(ptr %obj)
+  ret void
+}

>From 8f8ab009a4436ac04f6e9ca862ed405698fad003 Mon Sep 17 00:00:00 2001
From: Marina Taylor <marina_taylor at apple.com>
Date: Tue, 3 Mar 2026 17:20:25 +0000
Subject: [PATCH 2/2] check for all atomic stores

---
 .../Transforms/ObjCARC/DependencyAnalysis.cpp | 15 ++--
 .../Transforms/ObjCARC/sink-past-atomic.ll    | 74 ++++---------------
 2 files changed, 20 insertions(+), 69 deletions(-)

diff --git a/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp b/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
index 9706093102dae..ae9f9ad0d3bac 100644
--- a/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
+++ b/llvm/lib/Transforms/ObjCARC/DependencyAnalysis.cpp
@@ -24,7 +24,6 @@
 #include "ProvenanceAnalysis.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/IR/CFG.h"
-#include "llvm/Support/AtomicOrdering.h"
 
 using namespace llvm;
 using namespace llvm::objcarc;
@@ -68,13 +67,13 @@ bool llvm::objcarc::CanDecrementRefCount(const Instruction *Inst,
                                          const Value *Ptr,
                                          ProvenanceAnalysis &PA,
                                          ARCInstKind Class) {
-  // Atomic RMW and CmpXchg instructions with release or stronger ordering
-  // publish memory to other threads, which may then read the stored pointer and
-  // release it. Treat these as potentially decrementing refcounts.
-  if (const auto *RMW = dyn_cast<AtomicRMWInst>(Inst))
-    return isReleaseOrStronger(RMW->getOrdering());
-  if (const auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(Inst))
-    return isReleaseOrStronger(CmpXchg->getSuccessOrdering());
+  // Atomic stores, RMW, and CmpXchg may make a pointer visible to another
+  // thread, which could release it. Treat such instructions as potentially
+  // decrementing refcounts.
+  if (const auto *SI = dyn_cast<StoreInst>(Inst); SI && SI->isAtomic())
+    return true;
+  if (isa<AtomicRMWInst>(Inst) || isa<AtomicCmpXchgInst>(Inst))
+    return true;
 
   // Perform a quick check if Class can not touch ref counts.
   if (!CanDecrementRefCount(Class))
diff --git a/llvm/test/Transforms/ObjCARC/sink-past-atomic.ll b/llvm/test/Transforms/ObjCARC/sink-past-atomic.ll
index 796909cb41c76..6cf285c801ed5 100644
--- a/llvm/test/Transforms/ObjCARC/sink-past-atomic.ll
+++ b/llvm/test/Transforms/ObjCARC/sink-past-atomic.ll
@@ -5,75 +5,30 @@ target datalayout = "e-m:o-i64:64-i128:128-n32:64-S128"
 declare ptr @llvm.objc.retain(ptr)
 declare void @llvm.objc.release(ptr)
 
-; Retain must not sink past an atomicrmw with release or stronger ordering.
-
-define void @test_atomicrmw_release(ptr %obj, ptr %atomic_slot) {
-; CHECK-LABEL: @test_atomicrmw_release
+; Retain must not sink past atomicrmw.
+define void @test_atomicrmw(ptr %obj, ptr %atomic_slot) {
+; CHECK-LABEL: @test_atomicrmw
 ; CHECK: call ptr @llvm.objc.retain
 ; CHECK: atomicrmw
 entry:
   %obj_as_int = ptrtoint ptr %obj to i64
   %retained = call ptr @llvm.objc.retain(ptr %obj)
-  %old_value = atomicrmw xchg ptr %atomic_slot, i64 %obj_as_int release, align 8
+  %old_value = atomicrmw xchg ptr %atomic_slot, i64 %obj_as_int monotonic, align 8
   %old_obj = inttoptr i64 %old_value to ptr
   call void @llvm.objc.release(ptr %old_obj)
   call void @llvm.objc.release(ptr %obj)
   ret void
 }
 
-define void @test_atomicrmw_acqrel(ptr %obj, ptr %atomic_slot) {
-; CHECK-LABEL: @test_atomicrmw_acqrel
-; CHECK: call ptr @llvm.objc.retain
-; CHECK: atomicrmw
-entry:
-  %obj_as_int = ptrtoint ptr %obj to i64
-  %retained = call ptr @llvm.objc.retain(ptr %obj)
-  %old_value = atomicrmw xchg ptr %atomic_slot, i64 %obj_as_int acq_rel, align 8
-  %old_obj = inttoptr i64 %old_value to ptr
-  call void @llvm.objc.release(ptr %old_obj)
-  call void @llvm.objc.release(ptr %obj)
-  ret void
-}
-
-define void @test_atomicrmw_seqcst(ptr %obj, ptr %atomic_slot) {
-; CHECK-LABEL: @test_atomicrmw_seqcst
-; CHECK: call ptr @llvm.objc.retain
-; CHECK: atomicrmw
-entry:
-  %obj_as_int = ptrtoint ptr %obj to i64
-  %retained = call ptr @llvm.objc.retain(ptr %obj)
-  %old_value = atomicrmw xchg ptr %atomic_slot, i64 %obj_as_int seq_cst, align 8
-  %old_obj = inttoptr i64 %old_value to ptr
-  call void @llvm.objc.release(ptr %old_obj)
-  call void @llvm.objc.release(ptr %obj)
-  ret void
-}
-
-; Retain must not sink past a cmpxchg with release or stronger success ordering.
-
-define void @test_cmpxchg_release(ptr %obj, ptr %atomic_slot, i64 %expected) {
-; CHECK-LABEL: @test_cmpxchg_release
-; CHECK: call ptr @llvm.objc.retain
-; CHECK: cmpxchg
-entry:
-  %obj_as_int = ptrtoint ptr %obj to i64
-  %retained = call ptr @llvm.objc.retain(ptr %obj)
-  %res = cmpxchg ptr %atomic_slot, i64 %expected, i64 %obj_as_int release seq_cst, align 8
-  %old_int = extractvalue { i64, i1 } %res, 0
-  %old_obj = inttoptr i64 %old_int to ptr
-  call void @llvm.objc.release(ptr %old_obj)
-  call void @llvm.objc.release(ptr %obj)
-  ret void
-}
-
-define void @test_cmpxchg_acqrel(ptr %obj, ptr %atomic_slot, i64 %expected) {
-; CHECK-LABEL: @test_cmpxchg_acqrel
+; Retain must not sink past cmpxchg.
+define void @test_cmpxchg(ptr %obj, ptr %atomic_slot, i64 %expected) {
+; CHECK-LABEL: @test_cmpxchg
 ; CHECK: call ptr @llvm.objc.retain
 ; CHECK: cmpxchg
 entry:
   %obj_as_int = ptrtoint ptr %obj to i64
   %retained = call ptr @llvm.objc.retain(ptr %obj)
-  %res = cmpxchg ptr %atomic_slot, i64 %expected, i64 %obj_as_int acq_rel seq_cst, align 8
+  %res = cmpxchg ptr %atomic_slot, i64 %expected, i64 %obj_as_int monotonic monotonic, align 8
   %old_int = extractvalue { i64, i1 } %res, 0
   %old_obj = inttoptr i64 %old_int to ptr
   call void @llvm.objc.release(ptr %old_obj)
@@ -81,17 +36,14 @@ entry:
   ret void
 }
 
-define void @test_cmpxchg_seqcst(ptr %obj, ptr %atomic_slot, i64 %expected) {
-; CHECK-LABEL: @test_cmpxchg_seqcst
+; Retain must not sink past an atomic store.
+define void @test_atomic_store(ptr %obj, ptr %slot) {
+; CHECK-LABEL: @test_atomic_store
 ; CHECK: call ptr @llvm.objc.retain
-; CHECK: cmpxchg
+; CHECK: store atomic
 entry:
-  %obj_as_int = ptrtoint ptr %obj to i64
   %retained = call ptr @llvm.objc.retain(ptr %obj)
-  %res = cmpxchg ptr %atomic_slot, i64 %expected, i64 %obj_as_int seq_cst seq_cst, align 8
-  %old_int = extractvalue { i64, i1 } %res, 0
-  %old_obj = inttoptr i64 %old_int to ptr
-  call void @llvm.objc.release(ptr %old_obj)
+  store atomic ptr %obj, ptr %slot monotonic, align 8
   call void @llvm.objc.release(ptr %obj)
   ret void
 }



More information about the llvm-commits mailing list