[compiler-rt] [TSan] Fix deadlocks during TSan error reporting on Apple platforms (PR #151495)
Dan Blackwell via llvm-commits
llvm-commits at lists.llvm.org
Mon Aug 4 07:11:29 PDT 2025
https://github.com/DanBlackwell updated https://github.com/llvm/llvm-project/pull/151495
>From 702879ea115204366155923e903780baf1fcb1d6 Mon Sep 17 00:00:00 2001
From: Dan Blackwell <dan_blackwell at apple.com>
Date: Thu, 31 Jul 2025 10:15:54 +0100
Subject: [PATCH 1/4] [TSan] Fix deadlocks during TSan error reporting on Apple
platforms
* All platforms: move OutputReport (and thus symbolization) after the locked region when reporting.
* ScopedReportBase constructor requires the thread_registry lock to be held, so the regions where ScopedReports get created must be inside the locked region.
* Apple only: Bail early from MemoryAccess if we are symbolizing.
---
.../lib/tsan/rtl/tsan_interceptors_posix.cpp | 25 +++-
.../lib/tsan/rtl/tsan_interface_ann.cpp | 29 ++--
compiler-rt/lib/tsan/rtl/tsan_mman.cpp | 17 ++-
compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp | 5 +
compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp | 132 +++++++++++-------
compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp | 108 +++++++-------
compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp | 25 +++-
7 files changed, 213 insertions(+), 128 deletions(-)
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index 795e05394d71a..fa160126485ba 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -22,6 +22,7 @@
#include "sanitizer_common/sanitizer_internal_defs.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_platform_interceptors.h"
#include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
#include "sanitizer_common/sanitizer_platform_limits_posix.h"
@@ -2141,13 +2142,23 @@ static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) {
// StackTrace::GetNestInstructionPc(pc) is used because return address is
// expected, OutputReport() will undo this.
ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
- ThreadRegistryLock l(&ctx->thread_registry);
- ScopedReport rep(ReportTypeErrnoInSignal);
- rep.SetSigNum(sig);
- if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
- rep.AddStack(stack, true);
- OutputReport(thr, rep);
- }
+ ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ // Take a new scope as Apple platforms require the below locks released
+ // before symbolizing in order to avoid a deadlock
+ {
+ ThreadRegistryLock l(&ctx->thread_registry);
+ new (_rep) ScopedReport(ReportTypeErrnoInSignal);
+ ScopedReport &rep = *_rep;
+ rep.SetSigNum(sig);
+ if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
+ rep.AddStack(stack, true);
+ OutputReport(thr, rep);
+ }
+ } // Close this scope to release the locks
+
+ OutputReport(thr, *_rep);
+ // Need to manually destroy this because we used placement new to allocate
+ _rep->~ScopedReport();
}
static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
index befd6a369026d..fa9899b982d25 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
@@ -437,16 +437,25 @@ void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
}
static void ReportMutexHeldWrongContext(ThreadState *thr, uptr pc) {
- ThreadRegistryLock l(&ctx->thread_registry);
- ScopedReport rep(ReportTypeMutexHeldWrongContext);
- for (uptr i = 0; i < thr->mset.Size(); ++i) {
- MutexSet::Desc desc = thr->mset.Get(i);
- rep.AddMutex(desc.addr, desc.stack_id);
- }
- VarSizeStackTrace trace;
- ObtainCurrentStack(thr, pc, &trace);
- rep.AddStack(trace, true);
- OutputReport(thr, rep);
+ ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ // Take a new scope as Apple platforms require the below locks released
+ // before symbolizing in order to avoid a deadlock
+ {
+ ThreadRegistryLock l(&ctx->thread_registry);
+ new (_rep) ScopedReport(ReportTypeMutexHeldWrongContext);
+ ScopedReport &rep = *_rep;
+ for (uptr i = 0; i < thr->mset.Size(); ++i) {
+ MutexSet::Desc desc = thr->mset.Get(i);
+ rep.AddMutex(desc.addr, desc.stack_id);
+ }
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
+ } // Close this scope to release the locks
+
+ OutputReport(thr, *_rep);
+ // Need to manually destroy this because we used placement new to allocate
+ _rep->~ScopedReport();
}
INTERFACE_ATTRIBUTE
diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
index 0ea83fb3b5982..fce1d70de1d6f 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
@@ -182,10 +182,19 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
ObtainCurrentStack(thr, pc, &stack);
if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
return;
- ThreadRegistryLock l(&ctx->thread_registry);
- ScopedReport rep(ReportTypeSignalUnsafe);
- rep.AddStack(stack, true);
- OutputReport(thr, rep);
+ ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ // Take a new scope as Apple platforms require the below locks released
+ // before symbolizing in order to avoid a deadlock
+ {
+ ThreadRegistryLock l(&ctx->thread_registry);
+ new (_rep) ScopedReport(ReportTypeSignalUnsafe);
+ ScopedReport &rep = *_rep;
+ rep.AddStack(stack, true);
+ } // Close this scope to release the locks
+
+ OutputReport(thr, *_rep);
+ // Need to manually destroy this because we used placement new to allocate
+ _rep->~ScopedReport();
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
index 487fa490636eb..02719beac9f07 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
@@ -419,6 +419,11 @@ NOINLINE void TraceRestartMemoryAccess(ThreadState* thr, uptr pc, uptr addr,
ALWAYS_INLINE USED void MemoryAccess(ThreadState* thr, uptr pc, uptr addr,
uptr size, AccessType typ) {
+#if SANITIZER_APPLE && !SANITIZER_GO
+ // Swift symbolizer can be intercepted and deadlock without this
+ if (thr->in_symbolizer)
+ return;
+#endif
RawShadow* shadow_mem = MemToShadow(addr);
UNUSED char memBuf[4][64];
DPrintf2("#%d: Access: %d@%d %p/%zd typ=0x%x {%s, %s, %s, %s}\n", thr->tid,
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
index 2a2bf42c92ecb..4026f7a8e0c14 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
@@ -11,14 +11,15 @@
//===----------------------------------------------------------------------===//
#include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
+#include <sanitizer_common/sanitizer_placement_new.h>
#include <sanitizer_common/sanitizer_stackdepot.h>
-#include "tsan_rtl.h"
#include "tsan_flags.h"
-#include "tsan_sync.h"
+#include "tsan_platform.h"
#include "tsan_report.h"
+#include "tsan_rtl.h"
#include "tsan_symbolize.h"
-#include "tsan_platform.h"
+#include "tsan_sync.h"
namespace __tsan {
@@ -55,14 +56,23 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
return;
if (!ShouldReport(thr, typ))
return;
- ThreadRegistryLock l(&ctx->thread_registry);
- ScopedReport rep(typ);
- rep.AddMutex(addr, creation_stack_id);
- VarSizeStackTrace trace;
- ObtainCurrentStack(thr, pc, &trace);
- rep.AddStack(trace, true);
- rep.AddLocation(addr, 1);
- OutputReport(thr, rep);
+ ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ // Take a new scope as Apple platforms require the below locks released
+ // before symbolizing in order to avoid a deadlock
+ {
+ ThreadRegistryLock l(&ctx->thread_registry);
+ new (_rep) ScopedReport(typ);
+ ScopedReport &rep = *_rep;
+ rep.AddMutex(addr, creation_stack_id);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
+ rep.AddLocation(addr, 1);
+ } // Close this scope to release the locks
+
+ OutputReport(thr, *_rep);
+ // Need to manually destroy this because we used placement new to allocate
+ _rep->~ScopedReport();
}
static void RecordMutexLock(ThreadState *thr, uptr pc, uptr addr,
@@ -528,53 +538,71 @@ void AfterSleep(ThreadState *thr, uptr pc) {
void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock))
return;
- ThreadRegistryLock l(&ctx->thread_registry);
- ScopedReport rep(ReportTypeDeadlock);
- for (int i = 0; i < r->n; i++) {
- rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]);
- rep.AddUniqueTid((int)r->loop[i].thr_ctx);
- rep.AddThread((int)r->loop[i].thr_ctx);
- }
- uptr dummy_pc = 0x42;
- for (int i = 0; i < r->n; i++) {
- for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
- u32 stk = r->loop[i].stk[j];
- StackTrace stack;
- if (stk && stk != kInvalidStackID) {
- stack = StackDepotGet(stk);
- } else {
- // Sometimes we fail to extract the stack trace (FIXME: investigate),
- // but we should still produce some stack trace in the report.
- stack = StackTrace(&dummy_pc, 1);
+ ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ // Take a new scope as Apple platforms require the below locks released
+ // before symbolizing in order to avoid a deadlock
+ {
+ ThreadRegistryLock l(&ctx->thread_registry);
+ new (_rep) ScopedReport(ReportTypeDeadlock);
+ ScopedReport &rep = *_rep;
+ for (int i = 0; i < r->n; i++) {
+ rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]);
+ rep.AddUniqueTid((int)r->loop[i].thr_ctx);
+ rep.AddThread((int)r->loop[i].thr_ctx);
+ }
+ uptr dummy_pc = 0x42;
+ for (int i = 0; i < r->n; i++) {
+ for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
+ u32 stk = r->loop[i].stk[j];
+ StackTrace stack;
+ if (stk && stk != kInvalidStackID) {
+ stack = StackDepotGet(stk);
+ } else {
+ // Sometimes we fail to extract the stack trace (FIXME: investigate),
+ // but we should still produce some stack trace in the report.
+ stack = StackTrace(&dummy_pc, 1);
+ }
+ rep.AddStack(stack, true);
}
- rep.AddStack(stack, true);
}
- }
- OutputReport(thr, rep);
+ } // Close this scope to release the locks
+
+ OutputReport(thr, *_rep);
+ // Need to manually destroy this because we used placement new to allocate
+ _rep->~ScopedReport();
}
void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
FastState last_lock, StackID creation_stack_id) {
- // We need to lock the slot during RestoreStack because it protects
- // the slot journal.
- Lock slot_lock(&ctx->slots[static_cast<uptr>(last_lock.sid())].mtx);
- ThreadRegistryLock l0(&ctx->thread_registry);
- Lock slots_lock(&ctx->slot_mtx);
- ScopedReport rep(ReportTypeMutexDestroyLocked);
- rep.AddMutex(addr, creation_stack_id);
- VarSizeStackTrace trace;
- ObtainCurrentStack(thr, pc, &trace);
- rep.AddStack(trace, true);
-
- Tid tid;
- DynamicMutexSet mset;
- uptr tag;
- if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), addr,
- 0, kAccessWrite, &tid, &trace, mset, &tag))
- return;
- rep.AddStack(trace, true);
- rep.AddLocation(addr, 1);
- OutputReport(thr, rep);
+ ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ // Take a new scope as Apple platforms require the below locks released
+ // before symbolizing in order to avoid a deadlock
+ {
+ // We need to lock the slot during RestoreStack because it protects
+ // the slot journal.
+ Lock slot_lock(&ctx->slots[static_cast<uptr>(last_lock.sid())].mtx);
+ ThreadRegistryLock l0(&ctx->thread_registry);
+ Lock slots_lock(&ctx->slot_mtx);
+ new (_rep) ScopedReport(ReportTypeMutexDestroyLocked);
+ ScopedReport &rep = *_rep;
+ rep.AddMutex(addr, creation_stack_id);
+ VarSizeStackTrace trace;
+ ObtainCurrentStack(thr, pc, &trace);
+ rep.AddStack(trace, true);
+
+ Tid tid;
+ DynamicMutexSet mset;
+ uptr tag;
+ if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(),
+ addr, 0, kAccessWrite, &tid, &trace, mset, &tag))
+ return;
+ rep.AddStack(trace, true);
+ rep.AddLocation(addr, 1);
+ } // Close this scope to release the locks
+
+ OutputReport(thr, *_rep);
+ // Need to manually destroy this because we used placement new to allocate
+ _rep->~ScopedReport();
}
} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
index e6f0fda9c72af..c2e9ece3ad09e 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
@@ -16,6 +16,7 @@
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_defs.h"
#include "tsan_fd.h"
#include "tsan_flags.h"
#include "tsan_mman.h"
@@ -806,65 +807,76 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
DynamicMutexSet mset1;
MutexSet *mset[kMop] = {&thr->mset, mset1};
- // We need to lock the slot during RestoreStack because it protects
- // the slot journal.
- Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx);
- ThreadRegistryLock l0(&ctx->thread_registry);
- Lock slots_lock(&ctx->slot_mtx);
- if (SpuriousRace(old))
- return;
- if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1,
- size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) {
- StoreShadow(&ctx->last_spurious_race, old.raw());
- return;
- }
+ ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ // Take a new scope as Swift symbolizer the below locks released before
+ // symbolizing in order to avoid a deadlock
+ {
+ // We need to lock the slot during RestoreStack because it protects
+ // the slot journal.
+ Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx);
+ ThreadRegistryLock l0(&ctx->thread_registry);
+ Lock slots_lock(&ctx->slot_mtx);
+ if (SpuriousRace(old))
+ return;
+ if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1,
+ size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) {
+ StoreShadow(&ctx->last_spurious_race, old.raw());
+ return;
+ }
- if (IsFiredSuppression(ctx, rep_typ, traces[1]))
- return;
+ if (IsFiredSuppression(ctx, rep_typ, traces[1]))
+ return;
- if (HandleRacyStacks(thr, traces))
- return;
+ if (HandleRacyStacks(thr, traces))
+ return;
- // If any of the accesses has a tag, treat this as an "external" race.
- uptr tag = kExternalTagNone;
- for (uptr i = 0; i < kMop; i++) {
- if (tags[i] != kExternalTagNone) {
- rep_typ = ReportTypeExternalRace;
- tag = tags[i];
- break;
+ // If any of the accesses has a tag, treat this as an "external" race.
+ uptr tag = kExternalTagNone;
+ for (uptr i = 0; i < kMop; i++) {
+ if (tags[i] != kExternalTagNone) {
+ rep_typ = ReportTypeExternalRace;
+ tag = tags[i];
+ break;
+ }
}
- }
- ScopedReport rep(rep_typ, tag);
- for (uptr i = 0; i < kMop; i++)
- rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]);
+ new (_rep) ScopedReport(rep_typ, tag);
+ ScopedReport &rep = *_rep;
+ for (uptr i = 0; i < kMop; i++)
+ rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]);
- for (uptr i = 0; i < kMop; i++) {
- ThreadContext *tctx = static_cast<ThreadContext *>(
- ctx->thread_registry.GetThreadLocked(tids[i]));
- rep.AddThread(tctx);
- }
+ for (uptr i = 0; i < kMop; i++) {
+ ThreadContext *tctx = static_cast<ThreadContext *>(
+ ctx->thread_registry.GetThreadLocked(tids[i]));
+ rep.AddThread(tctx);
+ }
- rep.AddLocation(addr_min, addr_max - addr_min);
-
- if (flags()->print_full_thread_history) {
- const ReportDesc *rep_desc = rep.GetReport();
- for (uptr i = 0; i < rep_desc->threads.Size(); i++) {
- Tid parent_tid = rep_desc->threads[i]->parent_tid;
- if (parent_tid == kMainTid || parent_tid == kInvalidTid)
- continue;
- ThreadContext *parent_tctx = static_cast<ThreadContext *>(
- ctx->thread_registry.GetThreadLocked(parent_tid));
- rep.AddThread(parent_tctx);
+ rep.AddLocation(addr_min, addr_max - addr_min);
+
+ if (flags()->print_full_thread_history) {
+ const ReportDesc *rep_desc = rep.GetReport();
+ for (uptr i = 0; i < rep_desc->threads.Size(); i++) {
+ Tid parent_tid = rep_desc->threads[i]->parent_tid;
+ if (parent_tid == kMainTid || parent_tid == kInvalidTid)
+ continue;
+ ThreadContext *parent_tctx = static_cast<ThreadContext *>(
+ ctx->thread_registry.GetThreadLocked(parent_tid));
+ rep.AddThread(parent_tctx);
+ }
}
- }
#if !SANITIZER_GO
- if (!((typ0 | typ1) & kAccessFree) &&
- s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid()))
- rep.AddSleep(thr->last_sleep_stack_id);
+ if (!((typ0 | typ1) & kAccessFree) &&
+ s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid()))
+ rep.AddSleep(thr->last_sleep_stack_id);
#endif
- OutputReport(thr, rep);
+
+ } // Close this scope to release the locks
+
+ OutputReport(thr, *_rep);
+
+ // Need to manually destroy this because we used placement new to allocate
+ _rep->~ScopedReport();
}
void PrintCurrentStack(ThreadState *thr, uptr pc) {
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
index c6a8fd2acb6a8..d346798c7dfdc 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
@@ -88,15 +88,26 @@ void ThreadFinalize(ThreadState *thr) {
#if !SANITIZER_GO
if (!ShouldReport(thr, ReportTypeThreadLeak))
return;
- ThreadRegistryLock l(&ctx->thread_registry);
Vector<ThreadLeak> leaks;
- ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks,
- &leaks);
+ {
+ ThreadRegistryLock l(&ctx->thread_registry);
+ ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks,
+ &leaks);
+ }
+
+ ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
for (uptr i = 0; i < leaks.Size(); i++) {
- ScopedReport rep(ReportTypeThreadLeak);
- rep.AddThread(leaks[i].tctx, true);
- rep.SetCount(leaks[i].count);
- OutputReport(thr, rep);
+ {
+ ThreadRegistryLock l(&ctx->thread_registry);
+ new (_rep) ScopedReport(ReportTypeThreadLeak);
+ ScopedReport &rep = *_rep;
+ rep.AddThread(leaks[i].tctx, true);
+ rep.SetCount(leaks[i].count);
+ } // Close this scope to release the locks
+
+ OutputReport(thr, *_rep);
+ // Need to manually destroy this because we used placement new to allocate
+ _rep->~ScopedReport();
}
#endif
}
>From 5c02711774fd36ea77726788dcf945e37cdf221f Mon Sep 17 00:00:00 2001
From: Dan Blackwell <dan_blackwell at apple.com>
Date: Mon, 4 Aug 2025 14:36:43 +0100
Subject: [PATCH 2/4] Move ReportErrnoSpoiling OutputReport call outside of
locks on Apple platforms
---
.../lib/tsan/rtl/tsan_interceptors_posix.cpp | 29 +++++++++++--------
1 file changed, 17 insertions(+), 12 deletions(-)
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index fa160126485ba..db8a07e7c6ffa 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -2142,23 +2142,28 @@ static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) {
// StackTrace::GetNestInstructionPc(pc) is used because return address is
// expected, OutputReport() will undo this.
ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
- ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ bool suppressed;
// Take a new scope as Apple platforms require the below locks released
// before symbolizing in order to avoid a deadlock
{
ThreadRegistryLock l(&ctx->thread_registry);
- new (_rep) ScopedReport(ReportTypeErrnoInSignal);
- ScopedReport &rep = *_rep;
- rep.SetSigNum(sig);
- if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
- rep.AddStack(stack, true);
- OutputReport(thr, rep);
- }
- } // Close this scope to release the locks
+ new (rep) ScopedReport(ReportTypeErrnoInSignal);
+ rep->SetSigNum(sig);
+ suppressed = IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack);
+ if (suppressed)
+ rep->AddStack(stack, true);
+#if SANITIZER_APPLE
+ } // Close this scope to release the locks before writing report
+#endif
+ if (suppressed)
+ OutputReport(thr, *rep);
- OutputReport(thr, *_rep);
- // Need to manually destroy this because we used placement new to allocate
- _rep->~ScopedReport();
+ // Need to manually destroy this because we used placement new to allocate
+ rep->~ScopedReport();
+#if !SANITIZER_APPLE
+ }
+#endif
}
static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
>From 5292631ebddccc23100d619d71729159253c9717 Mon Sep 17 00:00:00 2001
From: Dan Blackwell <dan_blackwell at apple.com>
Date: Mon, 4 Aug 2025 15:07:43 +0100
Subject: [PATCH 3/4] Guard any OutputReports so that only SANITIZER_APPLE
releases locks before calling
---
.../lib/tsan/rtl/tsan_interceptors_posix.cpp | 15 +++--
.../lib/tsan/rtl/tsan_interface_ann.cpp | 19 +++---
compiler-rt/lib/tsan/rtl/tsan_mman.cpp | 17 +++--
compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp | 67 +++++++++++--------
compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp | 27 ++++----
compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp | 19 +++---
6 files changed, 93 insertions(+), 71 deletions(-)
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index db8a07e7c6ffa..90b75d5b76568 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -2151,19 +2151,20 @@ static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) {
new (rep) ScopedReport(ReportTypeErrnoInSignal);
rep->SetSigNum(sig);
suppressed = IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack);
- if (suppressed)
+ if (!suppressed)
rep->AddStack(stack, true);
#if SANITIZER_APPLE
} // Close this scope to release the locks before writing report
-#endif
- if (suppressed)
+ if (!suppressed)
+ OutputReport(thr, *rep);
+#else
+ if (!suppressed)
OutputReport(thr, *rep);
-
- // Need to manually destroy this because we used placement new to allocate
- rep->~ScopedReport();
-#if !SANITIZER_APPLE
}
#endif
+
+ // Need to manually destroy this because we used placement new to allocate
+ rep->~ScopedReport();
}
static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
index fa9899b982d25..bf7d8493c62c9 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
@@ -437,25 +437,28 @@ void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
}
static void ReportMutexHeldWrongContext(ThreadState *thr, uptr pc) {
- ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
// Take a new scope as Apple platforms require the below locks released
// before symbolizing in order to avoid a deadlock
{
ThreadRegistryLock l(&ctx->thread_registry);
- new (_rep) ScopedReport(ReportTypeMutexHeldWrongContext);
- ScopedReport &rep = *_rep;
+ new (rep) ScopedReport(ReportTypeMutexHeldWrongContext);
for (uptr i = 0; i < thr->mset.Size(); ++i) {
MutexSet::Desc desc = thr->mset.Get(i);
- rep.AddMutex(desc.addr, desc.stack_id);
+ rep->AddMutex(desc.addr, desc.stack_id);
}
VarSizeStackTrace trace;
ObtainCurrentStack(thr, pc, &trace);
- rep.AddStack(trace, true);
+ rep->AddStack(trace, true);
+#if SANITIZER_APPLE
} // Close this scope to release the locks
-
- OutputReport(thr, *_rep);
+ OutputReport(thr, *rep);
+#else
+ OutputReport(thr, *rep);
+ }
+#endif
// Need to manually destroy this because we used placement new to allocate
- _rep->~ScopedReport();
+ rep->~ScopedReport();
}
INTERFACE_ATTRIBUTE
diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
index fce1d70de1d6f..0a34a39b450a9 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
@@ -182,19 +182,22 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
ObtainCurrentStack(thr, pc, &stack);
if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
return;
- ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
// Take a new scope as Apple platforms require the below locks released
// before symbolizing in order to avoid a deadlock
{
ThreadRegistryLock l(&ctx->thread_registry);
- new (_rep) ScopedReport(ReportTypeSignalUnsafe);
- ScopedReport &rep = *_rep;
- rep.AddStack(stack, true);
+ new (rep) ScopedReport(ReportTypeSignalUnsafe);
+ rep->AddStack(stack, true);
+#if SANITIZER_APPLE
} // Close this scope to release the locks
-
- OutputReport(thr, *_rep);
+ OutputReport(thr, *rep);
+#else
+ OutputReport(thr, *rep);
+ }
+#endif
// Need to manually destroy this because we used placement new to allocate
- _rep->~ScopedReport();
+ rep->~ScopedReport();
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
index 4026f7a8e0c14..c4ef6f7ed15c1 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
@@ -56,23 +56,26 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
return;
if (!ShouldReport(thr, typ))
return;
- ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
// Take a new scope as Apple platforms require the below locks released
// before symbolizing in order to avoid a deadlock
{
ThreadRegistryLock l(&ctx->thread_registry);
- new (_rep) ScopedReport(typ);
- ScopedReport &rep = *_rep;
- rep.AddMutex(addr, creation_stack_id);
+ new (rep) ScopedReport(typ);
+ rep->AddMutex(addr, creation_stack_id);
VarSizeStackTrace trace;
ObtainCurrentStack(thr, pc, &trace);
- rep.AddStack(trace, true);
- rep.AddLocation(addr, 1);
+ rep->AddStack(trace, true);
+ rep->AddLocation(addr, 1);
+#if SANITIZER_APPLE
} // Close this scope to release the locks
-
- OutputReport(thr, *_rep);
+ OutputReport(thr, *rep);
+#else
+ OutputReport(thr, *rep);
+ }
+#endif
// Need to manually destroy this because we used placement new to allocate
- _rep->~ScopedReport();
+ rep->~ScopedReport();
}
static void RecordMutexLock(ThreadState *thr, uptr pc, uptr addr,
@@ -538,17 +541,16 @@ void AfterSleep(ThreadState *thr, uptr pc) {
void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock))
return;
- ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
// Take a new scope as Apple platforms require the below locks released
// before symbolizing in order to avoid a deadlock
{
ThreadRegistryLock l(&ctx->thread_registry);
- new (_rep) ScopedReport(ReportTypeDeadlock);
- ScopedReport &rep = *_rep;
+ new (rep) ScopedReport(ReportTypeDeadlock);
for (int i = 0; i < r->n; i++) {
- rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]);
- rep.AddUniqueTid((int)r->loop[i].thr_ctx);
- rep.AddThread((int)r->loop[i].thr_ctx);
+ rep->AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]);
+ rep->AddUniqueTid((int)r->loop[i].thr_ctx);
+ rep->AddThread((int)r->loop[i].thr_ctx);
}
uptr dummy_pc = 0x42;
for (int i = 0; i < r->n; i++) {
@@ -562,19 +564,23 @@ void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
// but we should still produce some stack trace in the report.
stack = StackTrace(&dummy_pc, 1);
}
- rep.AddStack(stack, true);
+ rep->AddStack(stack, true);
}
}
+#if SANITIZER_APPLE
} // Close this scope to release the locks
-
- OutputReport(thr, *_rep);
+ OutputReport(thr, *rep);
+#else
+ OutputReport(thr, *rep);
+ }
+#endif
// Need to manually destroy this because we used placement new to allocate
- _rep->~ScopedReport();
+ rep->~ScopedReport();
}
void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
FastState last_lock, StackID creation_stack_id) {
- ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
// Take a new scope as Apple platforms require the below locks released
// before symbolizing in order to avoid a deadlock
{
@@ -583,12 +589,11 @@ void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
Lock slot_lock(&ctx->slots[static_cast<uptr>(last_lock.sid())].mtx);
ThreadRegistryLock l0(&ctx->thread_registry);
Lock slots_lock(&ctx->slot_mtx);
- new (_rep) ScopedReport(ReportTypeMutexDestroyLocked);
- ScopedReport &rep = *_rep;
- rep.AddMutex(addr, creation_stack_id);
+ new (rep) ScopedReport(ReportTypeMutexDestroyLocked);
+ rep->AddMutex(addr, creation_stack_id);
VarSizeStackTrace trace;
ObtainCurrentStack(thr, pc, &trace);
- rep.AddStack(trace, true);
+ rep->AddStack(trace, true);
Tid tid;
DynamicMutexSet mset;
@@ -596,13 +601,17 @@ void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(),
addr, 0, kAccessWrite, &tid, &trace, mset, &tag))
return;
- rep.AddStack(trace, true);
- rep.AddLocation(addr, 1);
+ rep->AddStack(trace, true);
+ rep->AddLocation(addr, 1);
+#if SANITIZER_APPLE
} // Close this scope to release the locks
-
- OutputReport(thr, *_rep);
+ OutputReport(thr, *rep);
+#else
+ OutputReport(thr, *rep);
+ }
+#endif
// Need to manually destroy this because we used placement new to allocate
- _rep->~ScopedReport();
+ rep->~ScopedReport();
}
} // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
index c2e9ece3ad09e..2e91ef9b0e016 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
@@ -807,7 +807,7 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
DynamicMutexSet mset1;
MutexSet *mset[kMop] = {&thr->mset, mset1};
- ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
// Take a new scope as Swift symbolizer the below locks released before
// symbolizing in order to avoid a deadlock
{
@@ -840,43 +840,46 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
}
}
- new (_rep) ScopedReport(rep_typ, tag);
- ScopedReport &rep = *_rep;
+ new (rep) ScopedReport(rep_typ, tag);
for (uptr i = 0; i < kMop; i++)
- rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]);
+ rep->AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]);
for (uptr i = 0; i < kMop; i++) {
ThreadContext *tctx = static_cast<ThreadContext *>(
ctx->thread_registry.GetThreadLocked(tids[i]));
- rep.AddThread(tctx);
+ rep->AddThread(tctx);
}
- rep.AddLocation(addr_min, addr_max - addr_min);
+ rep->AddLocation(addr_min, addr_max - addr_min);
if (flags()->print_full_thread_history) {
- const ReportDesc *rep_desc = rep.GetReport();
+ const ReportDesc *rep_desc = rep->GetReport();
for (uptr i = 0; i < rep_desc->threads.Size(); i++) {
Tid parent_tid = rep_desc->threads[i]->parent_tid;
if (parent_tid == kMainTid || parent_tid == kInvalidTid)
continue;
ThreadContext *parent_tctx = static_cast<ThreadContext *>(
ctx->thread_registry.GetThreadLocked(parent_tid));
- rep.AddThread(parent_tctx);
+ rep->AddThread(parent_tctx);
}
}
#if !SANITIZER_GO
if (!((typ0 | typ1) & kAccessFree) &&
s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid()))
- rep.AddSleep(thr->last_sleep_stack_id);
+ rep->AddSleep(thr->last_sleep_stack_id);
#endif
+#if SANITIZER_APPLE
} // Close this scope to release the locks
-
- OutputReport(thr, *_rep);
+ OutputReport(thr, *rep);
+#else
+ OutputReport(thr, *rep);
+ }
+#endif
// Need to manually destroy this because we used placement new to allocate
- _rep->~ScopedReport();
+ rep->~ScopedReport();
}
void PrintCurrentStack(ThreadState *thr, uptr pc) {
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
index d346798c7dfdc..53dfb6a719cba 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
@@ -95,19 +95,22 @@ void ThreadFinalize(ThreadState *thr) {
&leaks);
}
- ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+ ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
for (uptr i = 0; i < leaks.Size(); i++) {
{
ThreadRegistryLock l(&ctx->thread_registry);
- new (_rep) ScopedReport(ReportTypeThreadLeak);
- ScopedReport &rep = *_rep;
- rep.AddThread(leaks[i].tctx, true);
- rep.SetCount(leaks[i].count);
+ new (rep) ScopedReport(ReportTypeThreadLeak);
+ rep->AddThread(leaks[i].tctx, true);
+ rep->SetCount(leaks[i].count);
+# if SANITIZER_APPLE
} // Close this scope to release the locks
-
- OutputReport(thr, *_rep);
+ OutputReport(thr, *rep);
+# else
+ OutputReport(thr, *rep);
+ }
+# endif
// Need to manually destroy this because we used placement new to allocate
- _rep->~ScopedReport();
+ rep->~ScopedReport();
}
#endif
}
>From ddb0daf5ffe5428d13503cf3944cf931743bc4b2 Mon Sep 17 00:00:00 2001
From: Dan Blackwell <dan_blackwell at apple.com>
Date: Mon, 4 Aug 2025 15:11:11 +0100
Subject: [PATCH 4/4] Fix grammar issue in comment
---
compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
index 2e91ef9b0e016..1c4ac37587a02 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
@@ -808,8 +808,8 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
MutexSet *mset[kMop] = {&thr->mset, mset1};
ScopedReport *rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
- // Take a new scope as Swift symbolizer the below locks released before
- // symbolizing in order to avoid a deadlock
+ // Take a new scope as Apple platforms require the below locks released
+ // before symbolizing in order to avoid a deadlock
{
// We need to lock the slot during RestoreStack because it protects
// the slot journal.
More information about the llvm-commits
mailing list