[compiler-rt] 0da172b - sanitizer_common: add thread safety annotations

Dmitry Vyukov via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 12 02:46:55 PDT 2021


Author: Dmitry Vyukov
Date: 2021-07-12T11:46:49+02:00
New Revision: 0da172b1766e1559ef677aa836dce4f1995dfef3

URL: https://github.com/llvm/llvm-project/commit/0da172b1766e1559ef677aa836dce4f1995dfef3
DIFF: https://github.com/llvm/llvm-project/commit/0da172b1766e1559ef677aa836dce4f1995dfef3.diff

LOG: sanitizer_common: add thread safety annotations

Enable clang Thread Safety Analysis for sanitizers:
https://clang.llvm.org/docs/ThreadSafetyAnalysis.html

Thread Safety Analysis can detect inconsistent locking,
deadlocks and data races. Without GUARDED_BY annotations
it has limited value. But this does all the heavy lifting
to enable analysis and allows to add GUARDED_BY incrementally.

Reviewed By: melver

Differential Revision: https://reviews.llvm.org/D105716

Added: 
    compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h

Modified: 
    compiler-rt/CMakeLists.txt
    compiler-rt/lib/asan/asan_allocator.cpp
    compiler-rt/lib/cfi/cfi.cpp
    compiler-rt/lib/lsan/lsan_common.h
    compiler-rt/lib/memprof/memprof_allocator.cpp
    compiler-rt/lib/sanitizer_common/CMakeLists.txt
    compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h
    compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
    compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
    compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
    compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
    compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
    compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
    compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
    compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
    compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp
    compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
    compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
    compiler-rt/lib/scudo/scudo_allocator.cpp
    compiler-rt/lib/scudo/scudo_tsd.h
    compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
    compiler-rt/lib/tsan/rtl/tsan_rtl.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/CMakeLists.txt b/compiler-rt/CMakeLists.txt
index cdb33087ab53b..782d34c610b8c 100644
--- a/compiler-rt/CMakeLists.txt
+++ b/compiler-rt/CMakeLists.txt
@@ -359,6 +359,14 @@ endif()
 
 append_list_if(COMPILER_RT_DEBUG -DSANITIZER_DEBUG=1 SANITIZER_COMMON_CFLAGS)
 
+if(CMAKE_CXX_COMPILER_ID MATCHES Clang)
+  list(APPEND SANITIZER_COMMON_CFLAGS
+      "-Werror=thread-safety"
+      "-Werror=thread-safety-reference"
+      "-Werror=thread-safety-beta"
+)
+endif()
+
 # If we're using MSVC,
 # always respect the optimization flags set by CMAKE_BUILD_TYPE instead.
 if (NOT MSVC)

diff  --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp
index 7c8bb504332d6..414fba3b427d4 100644
--- a/compiler-rt/lib/asan/asan_allocator.cpp
+++ b/compiler-rt/lib/asan/asan_allocator.cpp
@@ -852,12 +852,12 @@ struct Allocator {
     quarantine.PrintStats();
   }
 
-  void ForceLock() {
+  void ForceLock() ACQUIRE(fallback_mutex) {
     allocator.ForceLock();
     fallback_mutex.Lock();
   }
 
-  void ForceUnlock() {
+  void ForceUnlock() RELEASE(fallback_mutex) {
     fallback_mutex.Unlock();
     allocator.ForceUnlock();
   }
@@ -1081,11 +1081,9 @@ uptr asan_mz_size(const void *ptr) {
   return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
 }
 
-void asan_mz_force_lock() {
-  instance.ForceLock();
-}
+void asan_mz_force_lock() NO_THREAD_SAFETY_ANALYSIS { instance.ForceLock(); }
 
-void asan_mz_force_unlock() {
+void asan_mz_force_unlock() NO_THREAD_SAFETY_ANALYSIS {
   instance.ForceUnlock();
 }
 

diff  --git a/compiler-rt/lib/cfi/cfi.cpp b/compiler-rt/lib/cfi/cfi.cpp
index b75c72b215c27..f691cfb94cfc9 100644
--- a/compiler-rt/lib/cfi/cfi.cpp
+++ b/compiler-rt/lib/cfi/cfi.cpp
@@ -322,14 +322,14 @@ void InitShadow() {
 THREADLOCAL int in_loader;
 BlockingMutex shadow_update_lock(LINKER_INITIALIZED);
 
-void EnterLoader() {
+void EnterLoader() NO_THREAD_SAFETY_ANALYSIS {
   if (in_loader == 0) {
     shadow_update_lock.Lock();
   }
   ++in_loader;
 }
 
-void ExitLoader() {
+void ExitLoader() NO_THREAD_SAFETY_ANALYSIS {
   CHECK(in_loader > 0);
   --in_loader;
   UpdateShadow();

diff  --git a/compiler-rt/lib/lsan/lsan_common.h b/compiler-rt/lib/lsan/lsan_common.h
index fe855cf375458..776ca60b1e975 100644
--- a/compiler-rt/lib/lsan/lsan_common.h
+++ b/compiler-rt/lib/lsan/lsan_common.h
@@ -221,8 +221,8 @@ void UnlockAllocator();
 // Returns true if [addr, addr + sizeof(void *)) is poisoned.
 bool WordIsPoisoned(uptr addr);
 // Wrappers for ThreadRegistry access.
-void LockThreadRegistry();
-void UnlockThreadRegistry();
+void LockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
+void UnlockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
 ThreadRegistry *GetThreadRegistryLocked();
 bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
                            uptr *tls_begin, uptr *tls_end, uptr *cache_begin,

diff  --git a/compiler-rt/lib/memprof/memprof_allocator.cpp b/compiler-rt/lib/memprof/memprof_allocator.cpp
index 259c7c144ab79..6f01d4dfcb844 100644
--- a/compiler-rt/lib/memprof/memprof_allocator.cpp
+++ b/compiler-rt/lib/memprof/memprof_allocator.cpp
@@ -736,12 +736,12 @@ struct Allocator {
 
   void PrintStats() { allocator.PrintStats(); }
 
-  void ForceLock() {
+  void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
     allocator.ForceLock();
     fallback_mutex.Lock();
   }
 
-  void ForceUnlock() {
+  void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
     fallback_mutex.Unlock();
     allocator.ForceUnlock();
   }

diff  --git a/compiler-rt/lib/sanitizer_common/CMakeLists.txt b/compiler-rt/lib/sanitizer_common/CMakeLists.txt
index 60ddb80fb5fae..66246006a7a45 100644
--- a/compiler-rt/lib/sanitizer_common/CMakeLists.txt
+++ b/compiler-rt/lib/sanitizer_common/CMakeLists.txt
@@ -185,6 +185,7 @@ set(SANITIZER_IMPL_HEADERS
   sanitizer_syscall_linux_riscv64.inc
   sanitizer_syscalls_netbsd.inc
   sanitizer_thread_registry.h
+  sanitizer_thread_safety.h
   sanitizer_tls_get_addr.h
   sanitizer_vector.h
   sanitizer_win.h

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h b/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h
index a033e788cbf86..15f81a04350f2 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h
@@ -162,8 +162,8 @@ AddrHashMap<T, kSize>::AddrHashMap() {
   table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");
 }
 
-template<typename T, uptr kSize>
-void AddrHashMap<T, kSize>::acquire(Handle *h) {
+template <typename T, uptr kSize>
+void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
   uptr addr = h->addr_;
   uptr hash = calcHash(addr);
   Bucket *b = &table_[hash];
@@ -289,57 +289,57 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) {
   CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0);
   h->addidx_ = i;
   h->cell_ = c;
-}
-
-template<typename T, uptr kSize>
-void AddrHashMap<T, kSize>::release(Handle *h) {
-  if (!h->cell_)
-    return;
-  Bucket *b = h->bucket_;
-  Cell *c = h->cell_;
-  uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
-  if (h->created_) {
-    // Denote completion of insertion.
-    CHECK_EQ(addr1, 0);
-    // After the following store, the element becomes available
-    // for lock-free reads.
-    atomic_store(&c->addr, h->addr_, memory_order_release);
-    b->mtx.Unlock();
-  } else if (h->remove_) {
-    // Denote that the cell is empty now.
-    CHECK_EQ(addr1, h->addr_);
-    atomic_store(&c->addr, 0, memory_order_release);
-    // See if we need to compact the bucket.
-    AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);
-    if (h->addidx_ == -1U) {
-      // Removed from embed array, move an add element into the freed cell.
-      if (add && add->size != 0) {
-        uptr last = --add->size;
-        Cell *c1 = &add->cells[last];
-        c->val = c1->val;
-        uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
-        atomic_store(&c->addr, addr1, memory_order_release);
-        atomic_store(&c1->addr, 0, memory_order_release);
-      }
-    } else {
-      // Removed from add array, compact it.
-      uptr last = --add->size;
-      Cell *c1 = &add->cells[last];
-      if (c != c1) {
-        *c = *c1;
-        atomic_store(&c1->addr, 0, memory_order_relaxed);
-      }
-    }
-    if (add && add->size == 0) {
-      // FIXME(dvyukov): free add?
-    }
-    b->mtx.Unlock();
-  } else {
-    CHECK_EQ(addr1, h->addr_);
-    if (h->addidx_ != -1U)
-      b->mtx.ReadUnlock();
-  }
-}
+ }
+
+ template <typename T, uptr kSize>
+ void AddrHashMap<T, kSize>::release(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
+   if (!h->cell_)
+     return;
+   Bucket *b = h->bucket_;
+   Cell *c = h->cell_;
+   uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);
+   if (h->created_) {
+     // Denote completion of insertion.
+     CHECK_EQ(addr1, 0);
+     // After the following store, the element becomes available
+     // for lock-free reads.
+     atomic_store(&c->addr, h->addr_, memory_order_release);
+     b->mtx.Unlock();
+   } else if (h->remove_) {
+     // Denote that the cell is empty now.
+     CHECK_EQ(addr1, h->addr_);
+     atomic_store(&c->addr, 0, memory_order_release);
+     // See if we need to compact the bucket.
+     AddBucket *add = (AddBucket *)atomic_load(&b->add, memory_order_relaxed);
+     if (h->addidx_ == -1U) {
+       // Removed from embed array, move an add element into the freed cell.
+       if (add && add->size != 0) {
+         uptr last = --add->size;
+         Cell *c1 = &add->cells[last];
+         c->val = c1->val;
+         uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);
+         atomic_store(&c->addr, addr1, memory_order_release);
+         atomic_store(&c1->addr, 0, memory_order_release);
+       }
+     } else {
+       // Removed from add array, compact it.
+       uptr last = --add->size;
+       Cell *c1 = &add->cells[last];
+       if (c != c1) {
+         *c = *c1;
+         atomic_store(&c1->addr, 0, memory_order_relaxed);
+       }
+     }
+     if (add && add->size == 0) {
+       // FIXME(dvyukov): free add?
+     }
+     b->mtx.Unlock();
+   } else {
+     CHECK_EQ(addr1, h->addr_);
+     if (h->addidx_ != -1U)
+       b->mtx.ReadUnlock();
+   }
+ }
 
 template<typename T, uptr kSize>
 uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
index eb836bc478763..0e81e6764f9a6 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -177,12 +177,12 @@ class CombinedAllocator {
 
   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
   // introspection API.
-  void ForceLock() {
+  void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
     primary_.ForceLock();
     secondary_.ForceLock();
   }
 
-  void ForceUnlock() {
+  void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
     secondary_.ForceUnlock();
     primary_.ForceUnlock();
   }

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
index db30e138154ac..8bbed3c278c9a 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -312,13 +312,13 @@ class SizeClassAllocator64 {
 
   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
   // introspection API.
-  void ForceLock() {
+  void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
     for (uptr i = 0; i < kNumClasses; i++) {
       GetRegionInfo(i)->mutex.Lock();
     }
   }
 
-  void ForceUnlock() {
+  void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
     for (int i = (int)kNumClasses - 1; i >= 0; i--) {
       GetRegionInfo(i)->mutex.Unlock();
     }

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
index 61fb98742373a..dd34fe85cc3a8 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
@@ -267,13 +267,9 @@ class LargeMmapAllocator {
 
   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
   // introspection API.
-  void ForceLock() {
-    mutex_.Lock();
-  }
+  void ForceLock() ACQUIRE(mutex_) { mutex_.Lock(); }
 
-  void ForceUnlock() {
-    mutex_.Unlock();
-  }
+  void ForceUnlock() RELEASE(mutex_) { mutex_.Unlock(); }
 
   // Iterate over all existing chunks.
   // The allocator must be locked when calling this function.

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
index 19d7202c3e145..50ccfe5589711 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
@@ -136,8 +136,8 @@ void BlockingMutex::Unlock() {
   }
 }
 
-void BlockingMutex::CheckLocked() {
-  atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+void BlockingMutex::CheckLocked() const {
+  auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
   CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
 }
 

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
index 3d1ebbac29ba3..cc4dfd7dfe06d 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_linux.cpp
@@ -677,11 +677,11 @@ void BlockingMutex::Unlock() {
   }
 }
 
-void BlockingMutex::CheckLocked() {
-  atomic_uint32_t *m = reinterpret_cast<atomic_uint32_t *>(&opaque_storage_);
+void BlockingMutex::CheckLocked() const {
+  auto m = reinterpret_cast<atomic_uint32_t const *>(&opaque_storage_);
   CHECK_NE(MtxUnlocked, atomic_load(m, memory_order_relaxed));
 }
-#endif // !SANITIZER_SOLARIS
+#  endif  // !SANITIZER_SOLARIS
 
 // ----------------- sanitizer_linux.h
 // The actual size of this structure is specified by d_reclen.

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
index f5a28ebf05239..cf1b1250ae217 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
@@ -524,7 +524,7 @@ void BlockingMutex::Unlock() {
   OSSpinLockUnlock((OSSpinLock*)&opaque_storage_);
 }
 
-void BlockingMutex::CheckLocked() {
+void BlockingMutex::CheckLocked() const {
   CHECK_NE(*(OSSpinLock*)&opaque_storage_, 0);
 }
 

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
index 742cd6562a226..122a7a8d5a3d7 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
@@ -16,30 +16,29 @@
 #include "sanitizer_atomic.h"
 #include "sanitizer_internal_defs.h"
 #include "sanitizer_libc.h"
+#include "sanitizer_thread_safety.h"
 
 namespace __sanitizer {
 
-class StaticSpinMutex {
+class MUTEX StaticSpinMutex {
  public:
   void Init() {
     atomic_store(&state_, 0, memory_order_relaxed);
   }
 
-  void Lock() {
+  void Lock() ACQUIRE() {
     if (TryLock())
       return;
     LockSlow();
   }
 
-  bool TryLock() {
+  bool TryLock() TRY_ACQUIRE(true) {
     return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
   }
 
-  void Unlock() {
-    atomic_store(&state_, 0, memory_order_release);
-  }
+  void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
 
-  void CheckLocked() {
+  void CheckLocked() const CHECK_LOCKED {
     CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
   }
 
@@ -59,7 +58,7 @@ class StaticSpinMutex {
   }
 };
 
-class SpinMutex : public StaticSpinMutex {
+class MUTEX SpinMutex : public StaticSpinMutex {
  public:
   SpinMutex() {
     Init();
@@ -70,13 +69,13 @@ class SpinMutex : public StaticSpinMutex {
   void operator=(const SpinMutex &) = delete;
 };
 
-class BlockingMutex {
+class MUTEX BlockingMutex {
  public:
   explicit constexpr BlockingMutex(LinkerInitialized)
       : opaque_storage_ {0, }, owner_ {0} {}
   BlockingMutex();
-  void Lock();
-  void Unlock();
+  void Lock() ACQUIRE();
+  void Unlock() RELEASE();
 
   // This function does not guarantee an explicit check that the calling thread
   // is the thread which owns the mutex. This behavior, while more strictly
@@ -85,7 +84,7 @@ class BlockingMutex {
   // maintaining complex state to work around those situations, the check only
   // checks that the mutex is owned, and assumes callers to be generally
   // well-behaved.
-  void CheckLocked();
+  void CheckLocked() const CHECK_LOCKED;
 
  private:
   // Solaris mutex_t has a member that requires 64-bit alignment.
@@ -94,7 +93,7 @@ class BlockingMutex {
 };
 
 // Reader-writer spin mutex.
-class RWMutex {
+class MUTEX RWMutex {
  public:
   RWMutex() {
     atomic_store(&state_, kUnlocked, memory_order_relaxed);
@@ -104,7 +103,7 @@ class RWMutex {
     CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
   }
 
-  void Lock() {
+  void Lock() ACQUIRE() {
     u32 cmp = kUnlocked;
     if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
                                        memory_order_acquire))
@@ -112,27 +111,27 @@ class RWMutex {
     LockSlow();
   }
 
-  void Unlock() {
+  void Unlock() RELEASE() {
     u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
     DCHECK_NE(prev & kWriteLock, 0);
     (void)prev;
   }
 
-  void ReadLock() {
+  void ReadLock() ACQUIRE_SHARED() {
     u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
     if ((prev & kWriteLock) == 0)
       return;
     ReadLockSlow();
   }
 
-  void ReadUnlock() {
+  void ReadUnlock() RELEASE_SHARED() {
     u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
     DCHECK_EQ(prev & kWriteLock, 0);
     DCHECK_GT(prev & ~kWriteLock, 0);
     (void)prev;
   }
 
-  void CheckLocked() {
+  void CheckLocked() const CHECK_LOCKED {
     CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
   }
 
@@ -175,17 +174,14 @@ class RWMutex {
   void operator=(const RWMutex &) = delete;
 };
 
-template<typename MutexType>
-class GenericScopedLock {
+template <typename MutexType>
+class SCOPED_LOCK GenericScopedLock {
  public:
-  explicit GenericScopedLock(MutexType *mu)
-      : mu_(mu) {
+  explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
     mu_->Lock();
   }
 
-  ~GenericScopedLock() {
-    mu_->Unlock();
-  }
+  ~GenericScopedLock() RELEASE() { mu_->Unlock(); }
 
  private:
   MutexType *mu_;
@@ -194,17 +190,14 @@ class GenericScopedLock {
   void operator=(const GenericScopedLock &) = delete;
 };
 
-template<typename MutexType>
-class GenericScopedReadLock {
+template <typename MutexType>
+class SCOPED_LOCK GenericScopedReadLock {
  public:
-  explicit GenericScopedReadLock(MutexType *mu)
-      : mu_(mu) {
+  explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
     mu_->ReadLock();
   }
 
-  ~GenericScopedReadLock() {
-    mu_->ReadUnlock();
-  }
+  ~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
 
  private:
   MutexType *mu_;

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h b/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
index 992f23152c6ae..1a074d2bb700c 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
@@ -149,7 +149,8 @@ class Quarantine {
   Cache cache_;
   char pad2_[kCacheLineSize];
 
-  void NOINLINE Recycle(uptr min_size, Callback cb) {
+  void NOINLINE Recycle(uptr min_size, Callback cb) REQUIRES(recycle_mutex_)
+      RELEASE(recycle_mutex_) {
     Cache tmp;
     {
       SpinMutexLock l(&cache_mutex_);

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp
index 8789dcd10a954..c43c26dc615f6 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_solaris.cpp
@@ -231,9 +231,7 @@ void BlockingMutex::Unlock() {
   CHECK_EQ(mutex_unlock((mutex_t *)&opaque_storage_), 0);
 }
 
-void BlockingMutex::CheckLocked() {
-  CHECK_EQ((uptr)thr_self(), owner_);
-}
+void BlockingMutex::CheckLocked() const { CHECK_EQ((uptr)thr_self(), owner_); }
 
 }  // namespace __sanitizer
 

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h b/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
index fc9135f9e2d90..ded9e3af14065 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
@@ -85,7 +85,7 @@ class ThreadContextBase {
 
 typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
 
-class ThreadRegistry {
+class MUTEX ThreadRegistry {
  public:
   ThreadRegistry(ThreadContextFactory factory);
   ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
@@ -94,9 +94,9 @@ class ThreadRegistry {
                           uptr *alive = nullptr);
   uptr GetMaxAliveThreads();
 
-  void Lock() { mtx_.Lock(); }
-  void CheckLocked() { mtx_.CheckLocked(); }
-  void Unlock() { mtx_.Unlock(); }
+  void Lock() ACQUIRE() { mtx_.Lock(); }
+  void CheckLocked() const CHECK_LOCKED { mtx_.CheckLocked(); }
+  void Unlock() RELEASE() { mtx_.Unlock(); }
 
   // Should be guarded by ThreadRegistryLock.
   ThreadContextBase *GetThreadLocked(u32 tid) {

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h b/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h
new file mode 100644
index 0000000000000..949b98f4c1ad0
--- /dev/null
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h
@@ -0,0 +1,42 @@
+//===-- sanitizer_thread_safety.h -------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file is shared between sanitizer tools.
+//
+// Wrappers around thread safety annotations.
+// https://clang.llvm.org/docs/ThreadSafetyAnalysis.html
+//===----------------------------------------------------------------------===//
+
+#ifndef SANITIZER_THREAD_SAFETY_H
+#define SANITIZER_THREAD_SAFETY_H
+
+#if defined(__clang__)
+#  define THREAD_ANNOTATION(x) __attribute__((x))
+#else
+#  define THREAD_ANNOTATION(x)
+#endif
+
+#define MUTEX THREAD_ANNOTATION(capability("mutex"))
+#define SCOPED_LOCK THREAD_ANNOTATION(scoped_lockable)
+#define GUARDED_BY(x) THREAD_ANNOTATION(guarded_by(x))
+#define PT_GUARDED_BY(x) THREAD_ANNOTATION(pt_guarded_by(x))
+#define REQUIRES(...) THREAD_ANNOTATION(requires_capability(__VA_ARGS__))
+#define REQUIRES_SHARED(...) \
+  THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__))
+#define ACQUIRE(...) THREAD_ANNOTATION(acquire_capability(__VA_ARGS__))
+#define ACQUIRE_SHARED(...) \
+  THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__))
+#define TRY_ACQUIRE(...) THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__))
+#define RELEASE(...) THREAD_ANNOTATION(release_capability(__VA_ARGS__))
+#define RELEASE_SHARED(...) \
+  THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__))
+#define EXCLUDES(...) THREAD_ANNOTATION(locks_excluded(__VA_ARGS__))
+#define CHECK_LOCKED THREAD_ANNOTATION(assert_capability(this))
+#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION(no_thread_safety_analysis)
+
+#endif

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
index fd5dc94b7d91f..93700bde04a96 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
@@ -832,9 +832,7 @@ void BlockingMutex::Unlock() {
   ReleaseSRWLockExclusive((PSRWLOCK)opaque_storage_);
 }
 
-void BlockingMutex::CheckLocked() {
-  CHECK_EQ(owner_, GetThreadSelf());
-}
+void BlockingMutex::CheckLocked() const { CHECK_EQ(owner_, GetThreadSelf()); }
 
 uptr GetTlsSize() {
   return 0;

diff  --git a/compiler-rt/lib/scudo/scudo_allocator.cpp b/compiler-rt/lib/scudo/scudo_allocator.cpp
index d8a9a9cc5f90b..172353fadb1f8 100644
--- a/compiler-rt/lib/scudo/scudo_allocator.cpp
+++ b/compiler-rt/lib/scudo/scudo_allocator.cpp
@@ -300,7 +300,7 @@ struct Allocator {
 
   // Allocates a chunk.
   void *allocate(uptr Size, uptr Alignment, AllocType Type,
-                 bool ForceZeroContents = false) {
+                 bool ForceZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
     initThreadMaybe();
 
     if (UNLIKELY(Alignment > MaxAlignment)) {
@@ -405,7 +405,7 @@ struct Allocator {
   // a zero-sized quarantine, or if the size of the chunk is greater than the
   // quarantine chunk size threshold.
   void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
-                                   uptr Size) {
+                                   uptr Size) NO_THREAD_SAFETY_ANALYSIS {
     const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize);
     if (BypassQuarantine) {
       UnpackedHeader NewHeader = *Header;

diff  --git a/compiler-rt/lib/scudo/scudo_tsd.h b/compiler-rt/lib/scudo/scudo_tsd.h
index ec8dabc1f8a7d..e1310974db450 100644
--- a/compiler-rt/lib/scudo/scudo_tsd.h
+++ b/compiler-rt/lib/scudo/scudo_tsd.h
@@ -29,7 +29,7 @@ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
   void init();
   void commitBack();
 
-  inline bool tryLock() {
+  inline bool tryLock() TRY_ACQUIRE(true, Mutex) {
     if (Mutex.TryLock()) {
       atomic_store_relaxed(&Precedence, 0);
       return true;
@@ -40,12 +40,12 @@ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
     return false;
   }
 
-  inline void lock() {
+  inline void lock() ACQUIRE(Mutex) {
     atomic_store_relaxed(&Precedence, 0);
     Mutex.Lock();
   }
 
-  inline void unlock() { Mutex.Unlock(); }
+  inline void unlock() RELEASE(Mutex) { Mutex.Unlock(); }
 
   inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
 

diff  --git a/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc b/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
index 08e4d3af7316e..6e781c11cc7a5 100644
--- a/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
+++ b/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
@@ -34,7 +34,7 @@ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
   initThread(MinimalInit);
 }
 
-ALWAYS_INLINE ScudoTSD *getTSDAndLock(bool *UnlockRequired) {
+ALWAYS_INLINE ScudoTSD *getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
   if (UNLIKELY(ScudoThreadState != ThreadInitialized)) {
     FallbackTSD.lock();
     *UnlockRequired = true;

diff  --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
index d3fde2ad24c4e..e704cbde1dbe9 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
@@ -531,7 +531,7 @@ int Finalize(ThreadState *thr) {
 }
 
 #if !SANITIZER_GO
-void ForkBefore(ThreadState *thr, uptr pc) {
+void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
   ctx->thread_registry->Lock();
   ctx->report_mtx.Lock();
   // Suppress all reports in the pthread_atfork callbacks.
@@ -545,14 +545,14 @@ void ForkBefore(ThreadState *thr, uptr pc) {
   thr->ignore_interceptors++;
 }
 
-void ForkParentAfter(ThreadState *thr, uptr pc) {
+void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
   thr->suppress_reports--;  // Enabled in ForkBefore.
   thr->ignore_interceptors--;
   ctx->report_mtx.Unlock();
   ctx->thread_registry->Unlock();
 }
 
-void ForkChildAfter(ThreadState *thr, uptr pc) {
+void ForkChildAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
   thr->suppress_reports--;  // Enabled in ForkBefore.
   thr->ignore_interceptors--;
   ctx->report_mtx.Unlock();


        


More information about the llvm-commits mailing list