[compiler-rt] 3483892 - tsan: lock internal allocator around fork

Dmitry Vyukov via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 24 08:01:33 PST 2021


Author: Dmitry Vyukov
Date: 2021-11-24T17:01:28+01:00
New Revision: 348389263ca545f0dd71aea505c595331d2a07c5

URL: https://github.com/llvm/llvm-project/commit/348389263ca545f0dd71aea505c595331d2a07c5
DIFF: https://github.com/llvm/llvm-project/commit/348389263ca545f0dd71aea505c595331d2a07c5.diff

LOG: tsan: lock internal allocator around fork

There is a small chance that the internal allocator is locked
during fork and then the new process is created with locked
internal allocator and any attempts to use it will deadlock.
For example, if detected a suppressed race in the parent during fork
and then another suppressed race after the fork.
This becomes much more likely with the new tsan runtime
as it uses the internal allocator for more things.

Reviewed By: melver

Differential Revision: https://reviews.llvm.org/D114531

Added: 
    

Modified: 
    compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
    compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h
    compiler-rt/lib/tsan/rtl/tsan_mman.cpp
    compiler-rt/lib/tsan/rtl/tsan_mman.h
    compiler-rt/lib/tsan/rtl/tsan_rtl.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
index 3c04289dd8bf6..af0b0949a88ec 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
@@ -125,6 +125,16 @@ void InternalFree(void *addr, InternalAllocatorCache *cache) {
   RawInternalFree(addr, cache);
 }
 
+void InternalAllocatorLock() NO_THREAD_SAFETY_ANALYSIS {
+  internal_allocator_cache_mu.Lock();
+  internal_allocator()->ForceLock();
+}
+
+void InternalAllocatorUnlock() NO_THREAD_SAFETY_ANALYSIS {
+  internal_allocator()->ForceUnlock();
+  internal_allocator_cache_mu.Unlock();
+}
+
 // LowLevelAllocator
 constexpr uptr kLowLevelAllocatorDefaultAlignment = 8;
 static uptr low_level_alloc_min_alignment = kLowLevelAllocatorDefaultAlignment;

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h
index 32849036fd04b..38994736877ac 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_internal.h
@@ -48,6 +48,8 @@ void *InternalReallocArray(void *p, uptr count, uptr size,
 void *InternalCalloc(uptr count, uptr size,
                      InternalAllocatorCache *cache = nullptr);
 void InternalFree(void *p, InternalAllocatorCache *cache = nullptr);
+void InternalAllocatorLock();
+void InternalAllocatorUnlock();
 InternalAllocator *internal_allocator();
 
 } // namespace __sanitizer

diff  --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
index ef97ad0bc94ea..8f87fff461f25 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
@@ -110,6 +110,16 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
   gp->mtx.Unlock();
 }
 
+void AllocatorLock() NO_THREAD_SAFETY_ANALYSIS {
+  global_proc()->mtx.Lock();
+  InternalAllocatorLock();
+}
+
+void AllocatorUnlock() NO_THREAD_SAFETY_ANALYSIS {
+  InternalAllocatorUnlock();
+  global_proc()->mtx.Unlock();
+}
+
 static constexpr uptr kMaxAllowedMallocSize = 1ull << 40;
 static uptr max_user_defined_malloc_size;
 

diff  --git a/compiler-rt/lib/tsan/rtl/tsan_mman.h b/compiler-rt/lib/tsan/rtl/tsan_mman.h
index efea5e5abdec7..db8488eabbe28 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mman.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_mman.h
@@ -24,6 +24,8 @@ void ReplaceSystemMalloc();
 void AllocatorProcStart(Processor *proc);
 void AllocatorProcFinish(Processor *proc);
 void AllocatorPrintStats();
+void AllocatorLock();
+void AllocatorUnlock();
 
 // For user allocations.
 void *user_alloc_internal(ThreadState *thr, uptr pc, uptr sz,

diff  --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
index 8126a503ff6dc..4e45042323e81 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
@@ -494,6 +494,7 @@ void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
   ctx->thread_registry.Lock();
   ctx->report_mtx.Lock();
   ScopedErrorReportLock::Lock();
+  AllocatorLock();
   // Suppress all reports in the pthread_atfork callbacks.
   // Reports will deadlock on the report_mtx.
   // We could ignore sync operations as well,
@@ -512,6 +513,7 @@ void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
 void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
   thr->suppress_reports--;  // Enabled in ForkBefore.
   thr->ignore_interceptors--;
+  AllocatorUnlock();
   ScopedErrorReportLock::Unlock();
   ctx->report_mtx.Unlock();
   ctx->thread_registry.Unlock();
@@ -521,6 +523,7 @@ void ForkChildAfter(ThreadState *thr, uptr pc,
                     bool start_thread) NO_THREAD_SAFETY_ANALYSIS {
   thr->suppress_reports--;  // Enabled in ForkBefore.
   thr->ignore_interceptors--;
+  AllocatorUnlock();
   ScopedErrorReportLock::Unlock();
   ctx->report_mtx.Unlock();
   ctx->thread_registry.Unlock();
@@ -755,7 +758,7 @@ MutexMeta mutex_meta[] = {
     {MutexInvalid, "Invalid", {}},
     {MutexThreadRegistry, "ThreadRegistry", {}},
     {MutexTypeTrace, "Trace", {MutexLeaf}},
-    {MutexTypeReport, "Report", {MutexTypeSyncVar}},
+    {MutexTypeReport, "Report", {MutexTypeSyncVar, MutexTypeGlobalProc}},
     {MutexTypeSyncVar, "SyncVar", {}},
     {MutexTypeAnnotations, "Annotations", {}},
     {MutexTypeAtExit, "AtExit", {MutexTypeSyncVar}},


        


More information about the llvm-commits mailing list