[compiler-rt] [TSan] Fix deadlocks during TSan error reporting on Apple platforms (PR #151495)

Dan Blackwell via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 31 03:59:01 PDT 2025


https://github.com/DanBlackwell created https://github.com/llvm/llvm-project/pull/151495

Changes:

* All platforms: move OutputReport (and thus symbolization) after the locked region when reporting.
  * ScopedReportBase constructor requires the thread_registry lock to be held, so the regions where ScopedReports get created must be inside the locked region - to do this I have taken a new unnamed scope.
  * This has been done for all functions calling OutputReport, hence the diff here is large; this could be split into multiple PRs.
* Apple only: Bail early from MemoryAccess if we are symbolizing.

This is a follow up to https://github.com/llvm/llvm-project/pull/151120, which delayed symbolization of ScopedReports.

>From 702879ea115204366155923e903780baf1fcb1d6 Mon Sep 17 00:00:00 2001
From: Dan Blackwell <dan_blackwell at apple.com>
Date: Thu, 31 Jul 2025 10:15:54 +0100
Subject: [PATCH] [TSan] Fix deadlocks during TSan error reporting on Apple
 platforms

* All platforms: move OutputReport (and thus symbolization) after the locked region when reporting.
  * ScopedReportBase constructor requires the thread_registry lock to be held, so the regions where ScopedReports get created must be inside the locked region.
* Apple only: Bail early from MemoryAccess if we are symbolizing.
---
 .../lib/tsan/rtl/tsan_interceptors_posix.cpp  |  25 +++-
 .../lib/tsan/rtl/tsan_interface_ann.cpp       |  29 ++--
 compiler-rt/lib/tsan/rtl/tsan_mman.cpp        |  17 ++-
 compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp  |   5 +
 compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp   | 132 +++++++++++-------
 compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp  | 108 +++++++-------
 compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp  |  25 +++-
 7 files changed, 213 insertions(+), 128 deletions(-)

diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index 795e05394d71a..fa160126485ba 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -22,6 +22,7 @@
 #include "sanitizer_common/sanitizer_internal_defs.h"
 #include "sanitizer_common/sanitizer_libc.h"
 #include "sanitizer_common/sanitizer_linux.h"
+#include "sanitizer_common/sanitizer_placement_new.h"
 #include "sanitizer_common/sanitizer_platform_interceptors.h"
 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h"
 #include "sanitizer_common/sanitizer_platform_limits_posix.h"
@@ -2141,13 +2142,23 @@ static void ReportErrnoSpoiling(ThreadState *thr, uptr pc, int sig) {
   // StackTrace::GetNestInstructionPc(pc) is used because return address is
   // expected, OutputReport() will undo this.
   ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack);
-  ThreadRegistryLock l(&ctx->thread_registry);
-  ScopedReport rep(ReportTypeErrnoInSignal);
-  rep.SetSigNum(sig);
-  if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
-    rep.AddStack(stack, true);
-    OutputReport(thr, rep);
-  }
+  ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+  // Take a new scope as Apple platforms require the below locks released
+  // before symbolizing in order to avoid a deadlock
+  {
+    ThreadRegistryLock l(&ctx->thread_registry);
+    new (_rep) ScopedReport(ReportTypeErrnoInSignal);
+    ScopedReport &rep = *_rep;
+    rep.SetSigNum(sig);
+    if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) {
+      rep.AddStack(stack, true);
+      OutputReport(thr, rep);
+    }
+  }  // Close this scope to release the locks
+
+  OutputReport(thr, *_rep);
+  // Need to manually destroy this because we used placement new to allocate
+  _rep->~ScopedReport();
 }
 
 static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire,
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
index befd6a369026d..fa9899b982d25 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interface_ann.cpp
@@ -437,16 +437,25 @@ void __tsan_mutex_post_divert(void *addr, unsigned flagz) {
 }
 
 static void ReportMutexHeldWrongContext(ThreadState *thr, uptr pc) {
-  ThreadRegistryLock l(&ctx->thread_registry);
-  ScopedReport rep(ReportTypeMutexHeldWrongContext);
-  for (uptr i = 0; i < thr->mset.Size(); ++i) {
-    MutexSet::Desc desc = thr->mset.Get(i);
-    rep.AddMutex(desc.addr, desc.stack_id);
-  }
-  VarSizeStackTrace trace;
-  ObtainCurrentStack(thr, pc, &trace);
-  rep.AddStack(trace, true);
-  OutputReport(thr, rep);
+  ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+  // Take a new scope as Apple platforms require the below locks released
+  // before symbolizing in order to avoid a deadlock
+  {
+    ThreadRegistryLock l(&ctx->thread_registry);
+    new (_rep) ScopedReport(ReportTypeMutexHeldWrongContext);
+    ScopedReport &rep = *_rep;
+    for (uptr i = 0; i < thr->mset.Size(); ++i) {
+      MutexSet::Desc desc = thr->mset.Get(i);
+      rep.AddMutex(desc.addr, desc.stack_id);
+    }
+    VarSizeStackTrace trace;
+    ObtainCurrentStack(thr, pc, &trace);
+    rep.AddStack(trace, true);
+  }  // Close this scope to release the locks
+
+  OutputReport(thr, *_rep);
+  // Need to manually destroy this because we used placement new to allocate
+  _rep->~ScopedReport();
 }
 
 INTERFACE_ATTRIBUTE
diff --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
index 0ea83fb3b5982..fce1d70de1d6f 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
@@ -182,10 +182,19 @@ static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
   ObtainCurrentStack(thr, pc, &stack);
   if (IsFiredSuppression(ctx, ReportTypeSignalUnsafe, stack))
     return;
-  ThreadRegistryLock l(&ctx->thread_registry);
-  ScopedReport rep(ReportTypeSignalUnsafe);
-  rep.AddStack(stack, true);
-  OutputReport(thr, rep);
+  ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+  // Take a new scope as Apple platforms require the below locks released
+  // before symbolizing in order to avoid a deadlock
+  {
+    ThreadRegistryLock l(&ctx->thread_registry);
+    new (_rep) ScopedReport(ReportTypeSignalUnsafe);
+    ScopedReport &rep = *_rep;
+    rep.AddStack(stack, true);
+  }  // Close this scope to release the locks
+
+  OutputReport(thr, *_rep);
+  // Need to manually destroy this because we used placement new to allocate
+  _rep->~ScopedReport();
 }
 
 
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
index 487fa490636eb..02719beac9f07 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
@@ -419,6 +419,11 @@ NOINLINE void TraceRestartMemoryAccess(ThreadState* thr, uptr pc, uptr addr,
 
 ALWAYS_INLINE USED void MemoryAccess(ThreadState* thr, uptr pc, uptr addr,
                                      uptr size, AccessType typ) {
+#if SANITIZER_APPLE && !SANITIZER_GO
+  // Swift symbolizer can be intercepted and deadlock without this
+  if (thr->in_symbolizer)
+    return;
+#endif
   RawShadow* shadow_mem = MemToShadow(addr);
   UNUSED char memBuf[4][64];
   DPrintf2("#%d: Access: %d@%d %p/%zd typ=0x%x {%s, %s, %s, %s}\n", thr->tid,
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
index 2a2bf42c92ecb..4026f7a8e0c14 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_mutex.cpp
@@ -11,14 +11,15 @@
 //===----------------------------------------------------------------------===//
 
 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
+#include <sanitizer_common/sanitizer_placement_new.h>
 #include <sanitizer_common/sanitizer_stackdepot.h>
 
-#include "tsan_rtl.h"
 #include "tsan_flags.h"
-#include "tsan_sync.h"
+#include "tsan_platform.h"
 #include "tsan_report.h"
+#include "tsan_rtl.h"
 #include "tsan_symbolize.h"
-#include "tsan_platform.h"
+#include "tsan_sync.h"
 
 namespace __tsan {
 
@@ -55,14 +56,23 @@ static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
     return;
   if (!ShouldReport(thr, typ))
     return;
-  ThreadRegistryLock l(&ctx->thread_registry);
-  ScopedReport rep(typ);
-  rep.AddMutex(addr, creation_stack_id);
-  VarSizeStackTrace trace;
-  ObtainCurrentStack(thr, pc, &trace);
-  rep.AddStack(trace, true);
-  rep.AddLocation(addr, 1);
-  OutputReport(thr, rep);
+  ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+  // Take a new scope as Apple platforms require the below locks released
+  // before symbolizing in order to avoid a deadlock
+  {
+    ThreadRegistryLock l(&ctx->thread_registry);
+    new (_rep) ScopedReport(typ);
+    ScopedReport &rep = *_rep;
+    rep.AddMutex(addr, creation_stack_id);
+    VarSizeStackTrace trace;
+    ObtainCurrentStack(thr, pc, &trace);
+    rep.AddStack(trace, true);
+    rep.AddLocation(addr, 1);
+  }  // Close this scope to release the locks
+
+  OutputReport(thr, *_rep);
+  // Need to manually destroy this because we used placement new to allocate
+  _rep->~ScopedReport();
 }
 
 static void RecordMutexLock(ThreadState *thr, uptr pc, uptr addr,
@@ -528,53 +538,71 @@ void AfterSleep(ThreadState *thr, uptr pc) {
 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
   if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock))
     return;
-  ThreadRegistryLock l(&ctx->thread_registry);
-  ScopedReport rep(ReportTypeDeadlock);
-  for (int i = 0; i < r->n; i++) {
-    rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]);
-    rep.AddUniqueTid((int)r->loop[i].thr_ctx);
-    rep.AddThread((int)r->loop[i].thr_ctx);
-  }
-  uptr dummy_pc = 0x42;
-  for (int i = 0; i < r->n; i++) {
-    for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
-      u32 stk = r->loop[i].stk[j];
-      StackTrace stack;
-      if (stk && stk != kInvalidStackID) {
-        stack = StackDepotGet(stk);
-      } else {
-        // Sometimes we fail to extract the stack trace (FIXME: investigate),
-        // but we should still produce some stack trace in the report.
-        stack = StackTrace(&dummy_pc, 1);
+  ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+  // Take a new scope as Apple platforms require the below locks released
+  // before symbolizing in order to avoid a deadlock
+  {
+    ThreadRegistryLock l(&ctx->thread_registry);
+    new (_rep) ScopedReport(ReportTypeDeadlock);
+    ScopedReport &rep = *_rep;
+    for (int i = 0; i < r->n; i++) {
+      rep.AddMutex(r->loop[i].mtx_ctx0, r->loop[i].stk[0]);
+      rep.AddUniqueTid((int)r->loop[i].thr_ctx);
+      rep.AddThread((int)r->loop[i].thr_ctx);
+    }
+    uptr dummy_pc = 0x42;
+    for (int i = 0; i < r->n; i++) {
+      for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
+        u32 stk = r->loop[i].stk[j];
+        StackTrace stack;
+        if (stk && stk != kInvalidStackID) {
+          stack = StackDepotGet(stk);
+        } else {
+          // Sometimes we fail to extract the stack trace (FIXME: investigate),
+          // but we should still produce some stack trace in the report.
+          stack = StackTrace(&dummy_pc, 1);
+        }
+        rep.AddStack(stack, true);
       }
-      rep.AddStack(stack, true);
     }
-  }
-  OutputReport(thr, rep);
+  }  // Close this scope to release the locks
+
+  OutputReport(thr, *_rep);
+  // Need to manually destroy this because we used placement new to allocate
+  _rep->~ScopedReport();
 }
 
 void ReportDestroyLocked(ThreadState *thr, uptr pc, uptr addr,
                          FastState last_lock, StackID creation_stack_id) {
-  // We need to lock the slot during RestoreStack because it protects
-  // the slot journal.
-  Lock slot_lock(&ctx->slots[static_cast<uptr>(last_lock.sid())].mtx);
-  ThreadRegistryLock l0(&ctx->thread_registry);
-  Lock slots_lock(&ctx->slot_mtx);
-  ScopedReport rep(ReportTypeMutexDestroyLocked);
-  rep.AddMutex(addr, creation_stack_id);
-  VarSizeStackTrace trace;
-  ObtainCurrentStack(thr, pc, &trace);
-  rep.AddStack(trace, true);
-
-  Tid tid;
-  DynamicMutexSet mset;
-  uptr tag;
-  if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(), addr,
-                    0, kAccessWrite, &tid, &trace, mset, &tag))
-    return;
-  rep.AddStack(trace, true);
-  rep.AddLocation(addr, 1);
-  OutputReport(thr, rep);
+  ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+  // Take a new scope as Apple platforms require the below locks released
+  // before symbolizing in order to avoid a deadlock
+  {
+    // We need to lock the slot during RestoreStack because it protects
+    // the slot journal.
+    Lock slot_lock(&ctx->slots[static_cast<uptr>(last_lock.sid())].mtx);
+    ThreadRegistryLock l0(&ctx->thread_registry);
+    Lock slots_lock(&ctx->slot_mtx);
+    new (_rep) ScopedReport(ReportTypeMutexDestroyLocked);
+    ScopedReport &rep = *_rep;
+    rep.AddMutex(addr, creation_stack_id);
+    VarSizeStackTrace trace;
+    ObtainCurrentStack(thr, pc, &trace);
+    rep.AddStack(trace, true);
+
+    Tid tid;
+    DynamicMutexSet mset;
+    uptr tag;
+    if (!RestoreStack(EventType::kLock, last_lock.sid(), last_lock.epoch(),
+                      addr, 0, kAccessWrite, &tid, &trace, mset, &tag))
+      return;
+    rep.AddStack(trace, true);
+    rep.AddLocation(addr, 1);
+  }  // Close this scope to release the locks
+
+  OutputReport(thr, *_rep);
+  // Need to manually destroy this because we used placement new to allocate
+  _rep->~ScopedReport();
 }
 
 }  // namespace __tsan
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
index e6f0fda9c72af..c2e9ece3ad09e 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp
@@ -16,6 +16,7 @@
 #include "sanitizer_common/sanitizer_placement_new.h"
 #include "sanitizer_common/sanitizer_stackdepot.h"
 #include "sanitizer_common/sanitizer_stacktrace.h"
+#include "tsan_defs.h"
 #include "tsan_fd.h"
 #include "tsan_flags.h"
 #include "tsan_mman.h"
@@ -806,65 +807,76 @@ void ReportRace(ThreadState *thr, RawShadow *shadow_mem, Shadow cur, Shadow old,
   DynamicMutexSet mset1;
   MutexSet *mset[kMop] = {&thr->mset, mset1};
 
-  // We need to lock the slot during RestoreStack because it protects
-  // the slot journal.
-  Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx);
-  ThreadRegistryLock l0(&ctx->thread_registry);
-  Lock slots_lock(&ctx->slot_mtx);
-  if (SpuriousRace(old))
-    return;
-  if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1,
-                    size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) {
-    StoreShadow(&ctx->last_spurious_race, old.raw());
-    return;
-  }
+  ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
+  // Take a new scope as Swift symbolizer the below locks released before
+  // symbolizing in order to avoid a deadlock
+  {
+    // We need to lock the slot during RestoreStack because it protects
+    // the slot journal.
+    Lock slot_lock(&ctx->slots[static_cast<uptr>(s[1].sid())].mtx);
+    ThreadRegistryLock l0(&ctx->thread_registry);
+    Lock slots_lock(&ctx->slot_mtx);
+    if (SpuriousRace(old))
+      return;
+    if (!RestoreStack(EventType::kAccessExt, s[1].sid(), s[1].epoch(), addr1,
+                      size1, typ1, &tids[1], &traces[1], mset[1], &tags[1])) {
+      StoreShadow(&ctx->last_spurious_race, old.raw());
+      return;
+    }
 
-  if (IsFiredSuppression(ctx, rep_typ, traces[1]))
-    return;
+    if (IsFiredSuppression(ctx, rep_typ, traces[1]))
+      return;
 
-  if (HandleRacyStacks(thr, traces))
-    return;
+    if (HandleRacyStacks(thr, traces))
+      return;
 
-  // If any of the accesses has a tag, treat this as an "external" race.
-  uptr tag = kExternalTagNone;
-  for (uptr i = 0; i < kMop; i++) {
-    if (tags[i] != kExternalTagNone) {
-      rep_typ = ReportTypeExternalRace;
-      tag = tags[i];
-      break;
+    // If any of the accesses has a tag, treat this as an "external" race.
+    uptr tag = kExternalTagNone;
+    for (uptr i = 0; i < kMop; i++) {
+      if (tags[i] != kExternalTagNone) {
+        rep_typ = ReportTypeExternalRace;
+        tag = tags[i];
+        break;
+      }
     }
-  }
 
-  ScopedReport rep(rep_typ, tag);
-  for (uptr i = 0; i < kMop; i++)
-    rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]);
+    new (_rep) ScopedReport(rep_typ, tag);
+    ScopedReport &rep = *_rep;
+    for (uptr i = 0; i < kMop; i++)
+      rep.AddMemoryAccess(addr, tags[i], s[i], tids[i], traces[i], mset[i]);
 
-  for (uptr i = 0; i < kMop; i++) {
-    ThreadContext *tctx = static_cast<ThreadContext *>(
-        ctx->thread_registry.GetThreadLocked(tids[i]));
-    rep.AddThread(tctx);
-  }
+    for (uptr i = 0; i < kMop; i++) {
+      ThreadContext *tctx = static_cast<ThreadContext *>(
+          ctx->thread_registry.GetThreadLocked(tids[i]));
+      rep.AddThread(tctx);
+    }
 
-  rep.AddLocation(addr_min, addr_max - addr_min);
-
-  if (flags()->print_full_thread_history) {
-    const ReportDesc *rep_desc = rep.GetReport();
-    for (uptr i = 0; i < rep_desc->threads.Size(); i++) {
-      Tid parent_tid = rep_desc->threads[i]->parent_tid;
-      if (parent_tid == kMainTid || parent_tid == kInvalidTid)
-        continue;
-      ThreadContext *parent_tctx = static_cast<ThreadContext *>(
-          ctx->thread_registry.GetThreadLocked(parent_tid));
-      rep.AddThread(parent_tctx);
+    rep.AddLocation(addr_min, addr_max - addr_min);
+
+    if (flags()->print_full_thread_history) {
+      const ReportDesc *rep_desc = rep.GetReport();
+      for (uptr i = 0; i < rep_desc->threads.Size(); i++) {
+        Tid parent_tid = rep_desc->threads[i]->parent_tid;
+        if (parent_tid == kMainTid || parent_tid == kInvalidTid)
+          continue;
+        ThreadContext *parent_tctx = static_cast<ThreadContext *>(
+            ctx->thread_registry.GetThreadLocked(parent_tid));
+        rep.AddThread(parent_tctx);
+      }
     }
-  }
 
 #if !SANITIZER_GO
-  if (!((typ0 | typ1) & kAccessFree) &&
-      s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid()))
-    rep.AddSleep(thr->last_sleep_stack_id);
+    if (!((typ0 | typ1) & kAccessFree) &&
+        s[1].epoch() <= thr->last_sleep_clock.Get(s[1].sid()))
+      rep.AddSleep(thr->last_sleep_stack_id);
 #endif
-  OutputReport(thr, rep);
+
+  }  // Close this scope to release the locks
+
+  OutputReport(thr, *_rep);
+
+  // Need to manually destroy this because we used placement new to allocate
+  _rep->~ScopedReport();
 }
 
 void PrintCurrentStack(ThreadState *thr, uptr pc) {
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
index c6a8fd2acb6a8..d346798c7dfdc 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp
@@ -88,15 +88,26 @@ void ThreadFinalize(ThreadState *thr) {
 #if !SANITIZER_GO
   if (!ShouldReport(thr, ReportTypeThreadLeak))
     return;
-  ThreadRegistryLock l(&ctx->thread_registry);
   Vector<ThreadLeak> leaks;
-  ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks,
-                                                      &leaks);
+  {
+    ThreadRegistryLock l(&ctx->thread_registry);
+    ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks,
+                                                        &leaks);
+  }
+
+  ScopedReport *_rep = (ScopedReport *)__builtin_alloca(sizeof(ScopedReport));
   for (uptr i = 0; i < leaks.Size(); i++) {
-    ScopedReport rep(ReportTypeThreadLeak);
-    rep.AddThread(leaks[i].tctx, true);
-    rep.SetCount(leaks[i].count);
-    OutputReport(thr, rep);
+    {
+      ThreadRegistryLock l(&ctx->thread_registry);
+      new (_rep) ScopedReport(ReportTypeThreadLeak);
+      ScopedReport &rep = *_rep;
+      rep.AddThread(leaks[i].tctx, true);
+      rep.SetCount(leaks[i].count);
+    }  // Close this scope to release the locks
+
+    OutputReport(thr, *_rep);
+    // Need to manually destroy this because we used placement new to allocate
+    _rep->~ScopedReport();
   }
 #endif
 }



More information about the llvm-commits mailing list