[compiler-rt] 30973f6 - [GWP-ASan] Add locking around unwinder for atfork protection.

Mitch Phillips via llvm-commits llvm-commits at lists.llvm.org
Tue Feb 2 14:45:05 PST 2021


Author: Mitch Phillips
Date: 2021-02-02T14:44:35-08:00
New Revision: 30973f6fe01cc0a9624147466f0c54b91a1b61d7

URL: https://github.com/llvm/llvm-project/commit/30973f6fe01cc0a9624147466f0c54b91a1b61d7
DIFF: https://github.com/llvm/llvm-project/commit/30973f6fe01cc0a9624147466f0c54b91a1b61d7.diff

LOG: [GWP-ASan] Add locking around unwinder for atfork protection.

Unwinders (like libc's backtrace()) can call their own locks (like the
libdl lock). We need to let the unwinder release the locks before
forking. Wrap a new lock around the unwinder for atfork protection.

Reviewed By: eugenis

Differential Revision: https://reviews.llvm.org/D95889

Added: 
    

Modified: 
    compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
    compiler-rt/lib/gwp_asan/guarded_pool_allocator.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
index 86304d98dc5d..5e3455e54ef9 100644
--- a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
+++ b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp
@@ -103,9 +103,15 @@ void GuardedPoolAllocator::init(const options::Options &Opts) {
     installAtFork();
 }
 
-void GuardedPoolAllocator::disable() { PoolMutex.lock(); }
+void GuardedPoolAllocator::disable() {
+  PoolMutex.lock();
+  BacktraceMutex.lock();
+}
 
-void GuardedPoolAllocator::enable() { PoolMutex.unlock(); }
+void GuardedPoolAllocator::enable() {
+  PoolMutex.unlock();
+  BacktraceMutex.unlock();
+}
 
 void GuardedPoolAllocator::iterate(void *Base, size_t Size, iterate_callback Cb,
                                    void *Arg) {
@@ -232,7 +238,10 @@ void *GuardedPoolAllocator::allocate(size_t Size, size_t Alignment) {
       roundUpTo(Size, PageSize));
 
   Meta->RecordAllocation(UserPtr, Size);
-  Meta->AllocationTrace.RecordBacktrace(Backtrace);
+  {
+    ScopedLock UL(BacktraceMutex);
+    Meta->AllocationTrace.RecordBacktrace(Backtrace);
+  }
 
   return reinterpret_cast<void *>(UserPtr);
 }
@@ -281,6 +290,7 @@ void GuardedPoolAllocator::deallocate(void *Ptr) {
     // otherwise non-reentrant unwinders may deadlock.
     if (!getThreadLocals()->RecursiveGuard) {
       ScopedRecursiveGuard SRG;
+      ScopedLock UL(BacktraceMutex);
       Meta->DeallocationTrace.RecordBacktrace(Backtrace);
     }
   }

diff  --git a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
index 86521f90fef3..26a459925a4c 100644
--- a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
+++ b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h
@@ -196,6 +196,10 @@ class GuardedPoolAllocator {
 
   // A mutex to protect the guarded slot and metadata pool for this class.
   Mutex PoolMutex;
+  // Some unwinders can grab the libdl lock. In order to provide atfork
+  // protection, we need to ensure that we allow an unwinding thread to release
+  // the libdl lock before forking.
+  Mutex BacktraceMutex;
   // Record the number allocations that we've sampled. We store this amount so
   // that we don't randomly choose to recycle a slot that previously had an
   // allocation before all the slots have been utilised.


        


More information about the llvm-commits mailing list