[compiler-rt] 765921d - sanitizer_common: prefix thread-safety macros with SANITIZER_

Dmitry Vyukov via llvm-commits llvm-commits at lists.llvm.org
Fri Jan 7 06:11:25 PST 2022


Author: Dmitry Vyukov
Date: 2022-01-07T15:11:00+01:00
New Revision: 765921de5b8c4ff048fa951bbd11be436289ec6b

URL: https://github.com/llvm/llvm-project/commit/765921de5b8c4ff048fa951bbd11be436289ec6b
DIFF: https://github.com/llvm/llvm-project/commit/765921de5b8c4ff048fa951bbd11be436289ec6b.diff

LOG: sanitizer_common: prefix thread-safety macros with SANITIZER_

Currently we use very common names for macros like ACQUIRE/RELEASE,
which cause conflicts with system headers.
Prefix all macros with SANITIZER_ to avoid conflicts.

Reviewed By: vitalybuka

Differential Revision: https://reviews.llvm.org/D116652

Added: 
    

Modified: 
    compiler-rt/lib/asan/asan_allocator.cpp
    compiler-rt/lib/cfi/cfi.cpp
    compiler-rt/lib/lsan/lsan_common.h
    compiler-rt/lib/memprof/memprof_allocator.cpp
    compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h
    compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
    compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
    compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
    compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
    compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
    compiler-rt/lib/sanitizer_common/sanitizer_common.h
    compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
    compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
    compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
    compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
    compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
    compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
    compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h
    compiler-rt/lib/scudo/scudo_allocator.cpp
    compiler-rt/lib/scudo/scudo_tsd.h
    compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
    compiler-rt/lib/scudo/scudo_tsd_shared.cpp
    compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp
    compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp
    compiler-rt/lib/tsan/rtl/tsan_mman.cpp
    compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
    compiler-rt/lib/tsan/rtl/tsan_rtl.h
    compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp
index 1ff7091460ad6..f9f1cfcd9f87d 100644
--- a/compiler-rt/lib/asan/asan_allocator.cpp
+++ b/compiler-rt/lib/asan/asan_allocator.cpp
@@ -840,12 +840,12 @@ struct Allocator {
     quarantine.PrintStats();
   }
 
-  void ForceLock() ACQUIRE(fallback_mutex) {
+  void ForceLock() SANITIZER_ACQUIRE(fallback_mutex) {
     allocator.ForceLock();
     fallback_mutex.Lock();
   }
 
-  void ForceUnlock() RELEASE(fallback_mutex) {
+  void ForceUnlock() SANITIZER_RELEASE(fallback_mutex) {
     fallback_mutex.Unlock();
     allocator.ForceUnlock();
   }
@@ -1054,9 +1054,11 @@ uptr asan_mz_size(const void *ptr) {
   return instance.AllocationSize(reinterpret_cast<uptr>(ptr));
 }
 
-void asan_mz_force_lock() NO_THREAD_SAFETY_ANALYSIS { instance.ForceLock(); }
+void asan_mz_force_lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+  instance.ForceLock();
+}
 
-void asan_mz_force_unlock() NO_THREAD_SAFETY_ANALYSIS {
+void asan_mz_force_unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   instance.ForceUnlock();
 }
 

diff  --git a/compiler-rt/lib/cfi/cfi.cpp b/compiler-rt/lib/cfi/cfi.cpp
index 65a10c999cc6b..22f0b175dd87d 100644
--- a/compiler-rt/lib/cfi/cfi.cpp
+++ b/compiler-rt/lib/cfi/cfi.cpp
@@ -322,14 +322,14 @@ void InitShadow() {
 THREADLOCAL int in_loader;
 Mutex shadow_update_lock;
 
-void EnterLoader() NO_THREAD_SAFETY_ANALYSIS {
+void EnterLoader() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   if (in_loader == 0) {
     shadow_update_lock.Lock();
   }
   ++in_loader;
 }
 
-void ExitLoader() NO_THREAD_SAFETY_ANALYSIS {
+void ExitLoader() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   CHECK(in_loader > 0);
   --in_loader;
   UpdateShadow();

diff  --git a/compiler-rt/lib/lsan/lsan_common.h b/compiler-rt/lib/lsan/lsan_common.h
index 61b64d4dc30ff..6b06c4517cd5d 100644
--- a/compiler-rt/lib/lsan/lsan_common.h
+++ b/compiler-rt/lib/lsan/lsan_common.h
@@ -230,8 +230,8 @@ void UnlockAllocator();
 // Returns true if [addr, addr + sizeof(void *)) is poisoned.
 bool WordIsPoisoned(uptr addr);
 // Wrappers for ThreadRegistry access.
-void LockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
-void UnlockThreadRegistry() NO_THREAD_SAFETY_ANALYSIS;
+void LockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
+void UnlockThreadRegistry() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
 
 struct ScopedStopTheWorldLock {
   ScopedStopTheWorldLock() {

diff  --git a/compiler-rt/lib/memprof/memprof_allocator.cpp b/compiler-rt/lib/memprof/memprof_allocator.cpp
index adbdf365bc4c4..0974b898666b3 100644
--- a/compiler-rt/lib/memprof/memprof_allocator.cpp
+++ b/compiler-rt/lib/memprof/memprof_allocator.cpp
@@ -524,12 +524,12 @@ struct Allocator {
 
   void PrintStats() { allocator.PrintStats(); }
 
-  void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
+  void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
     allocator.ForceLock();
     fallback_mutex.Lock();
   }
 
-  void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
+  void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
     fallback_mutex.Unlock();
     allocator.ForceUnlock();
   }

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h b/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h
index 7e2fa91089f13..fe48b9caf0670 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_addrhashmap.h
@@ -201,7 +201,8 @@ AddrHashMap<T, kSize>::AddrHashMap() {
 }
 
 template <typename T, uptr kSize>
-void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
+void AddrHashMap<T, kSize>::acquire(Handle *h)
+    SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   uptr addr = h->addr_;
   uptr hash = calcHash(addr);
   Bucket *b = &table_[hash];
@@ -330,7 +331,8 @@ void AddrHashMap<T, kSize>::acquire(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
  }
 
  template <typename T, uptr kSize>
- void AddrHashMap<T, kSize>::release(Handle *h) NO_THREAD_SAFETY_ANALYSIS {
+ void AddrHashMap<T, kSize>::release(Handle *h)
+     SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
    if (!h->cell_)
      return;
    Bucket *b = h->bucket_;

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
index c5a5fb7371dd9..25a43a59f0475 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator.cpp
@@ -126,12 +126,12 @@ void InternalFree(void *addr, InternalAllocatorCache *cache) {
   RawInternalFree(addr, cache);
 }
 
-void InternalAllocatorLock() NO_THREAD_SAFETY_ANALYSIS {
+void InternalAllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   internal_allocator_cache_mu.Lock();
   internal_allocator()->ForceLock();
 }
 
-void InternalAllocatorUnlock() NO_THREAD_SAFETY_ANALYSIS {
+void InternalAllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   internal_allocator()->ForceUnlock();
   internal_allocator_cache_mu.Unlock();
 }

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
index 9a3602f730b30..b92cfa5bf4c4b 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -175,12 +175,12 @@ class CombinedAllocator {
 
   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
   // introspection API.
-  void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
+  void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
     primary_.ForceLock();
     secondary_.ForceLock();
   }
 
-  void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
+  void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
     secondary_.ForceUnlock();
     primary_.ForceUnlock();
   }

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
index ae1b7e0d5f1c4..f2471efced613 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary32.h
@@ -238,13 +238,13 @@ class SizeClassAllocator32 {
 
   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
   // introspection API.
-  void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
+  void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
     for (uptr i = 0; i < kNumClasses; i++) {
       GetSizeClassInfo(i)->mutex.Lock();
     }
   }
 
-  void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
+  void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
     for (int i = kNumClasses - 1; i >= 0; i--) {
       GetSizeClassInfo(i)->mutex.Unlock();
     }

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
index f917310cfebb4..66ba71d325dad 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_primary64.h
@@ -354,13 +354,13 @@ class SizeClassAllocator64 {
 
   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
   // introspection API.
-  void ForceLock() NO_THREAD_SAFETY_ANALYSIS {
+  void ForceLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
     for (uptr i = 0; i < kNumClasses; i++) {
       GetRegionInfo(i)->mutex.Lock();
     }
   }
 
-  void ForceUnlock() NO_THREAD_SAFETY_ANALYSIS {
+  void ForceUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
     for (int i = (int)kNumClasses - 1; i >= 0; i--) {
       GetRegionInfo(i)->mutex.Unlock();
     }

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
index c24354cb5b2ae..48afb2a298341 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_secondary.h
@@ -267,9 +267,9 @@ class LargeMmapAllocator {
 
   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
   // introspection API.
-  void ForceLock() ACQUIRE(mutex_) { mutex_.Lock(); }
+  void ForceLock() SANITIZER_ACQUIRE(mutex_) { mutex_.Lock(); }
 
-  void ForceUnlock() RELEASE(mutex_) { mutex_.Unlock(); }
+  void ForceUnlock() SANITIZER_RELEASE(mutex_) { mutex_.Unlock(); }
 
   // Iterate over all existing chunks.
   // The allocator must be locked when calling this function.

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
index 9ddb099a8dbc6..139d5a0666646 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
@@ -238,12 +238,12 @@ void SetPrintfAndReportCallback(void (*callback)(const char *));
 // Lock sanitizer error reporting and protects against nested errors.
 class ScopedErrorReportLock {
  public:
-  ScopedErrorReportLock() ACQUIRE(mutex_) { Lock(); }
-  ~ScopedErrorReportLock() RELEASE(mutex_) { Unlock(); }
+  ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); }
+  ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); }
 
-  static void Lock() ACQUIRE(mutex_);
-  static void Unlock() RELEASE(mutex_);
-  static void CheckLocked() CHECK_LOCKED(mutex_);
+  static void Lock() SANITIZER_ACQUIRE(mutex_);
+  static void Unlock() SANITIZER_RELEASE(mutex_);
+  static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_);
 
  private:
   static atomic_uintptr_t reporting_thread_;

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
index 5ec6efaa6490c..c16f5cdc1d717 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
@@ -20,25 +20,27 @@
 
 namespace __sanitizer {
 
-class MUTEX StaticSpinMutex {
+class SANITIZER_MUTEX StaticSpinMutex {
  public:
   void Init() {
     atomic_store(&state_, 0, memory_order_relaxed);
   }
 
-  void Lock() ACQUIRE() {
+  void Lock() SANITIZER_ACQUIRE() {
     if (LIKELY(TryLock()))
       return;
     LockSlow();
   }
 
-  bool TryLock() TRY_ACQUIRE(true) {
+  bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
     return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
   }
 
-  void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
+  void Unlock() SANITIZER_RELEASE() {
+    atomic_store(&state_, 0, memory_order_release);
+  }
 
-  void CheckLocked() const CHECK_LOCKED() {
+  void CheckLocked() const SANITIZER_CHECK_LOCKED() {
     CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
   }
 
@@ -48,7 +50,7 @@ class MUTEX StaticSpinMutex {
   void LockSlow();
 };
 
-class MUTEX SpinMutex : public StaticSpinMutex {
+class SANITIZER_MUTEX SpinMutex : public StaticSpinMutex {
  public:
   SpinMutex() {
     Init();
@@ -156,12 +158,12 @@ class CheckedMutex {
 // Derive from CheckedMutex for the purposes of EBO.
 // We could make it a field marked with [[no_unique_address]],
 // but this attribute is not supported by some older compilers.
-class MUTEX Mutex : CheckedMutex {
+class SANITIZER_MUTEX Mutex : CheckedMutex {
  public:
   explicit constexpr Mutex(MutexType type = MutexUnchecked)
       : CheckedMutex(type) {}
 
-  void Lock() ACQUIRE() {
+  void Lock() SANITIZER_ACQUIRE() {
     CheckedMutex::Lock();
     u64 reset_mask = ~0ull;
     u64 state = atomic_load_relaxed(&state_);
@@ -206,7 +208,7 @@ class MUTEX Mutex : CheckedMutex {
     }
   }
 
-  void Unlock() RELEASE() {
+  void Unlock() SANITIZER_RELEASE() {
     CheckedMutex::Unlock();
     bool wake_writer;
     u64 wake_readers;
@@ -234,7 +236,7 @@ class MUTEX Mutex : CheckedMutex {
       readers_.Post(wake_readers);
   }
 
-  void ReadLock() ACQUIRE_SHARED() {
+  void ReadLock() SANITIZER_ACQUIRE_SHARED() {
     CheckedMutex::Lock();
     u64 reset_mask = ~0ull;
     u64 state = atomic_load_relaxed(&state_);
@@ -271,7 +273,7 @@ class MUTEX Mutex : CheckedMutex {
     }
   }
 
-  void ReadUnlock() RELEASE_SHARED() {
+  void ReadUnlock() SANITIZER_RELEASE_SHARED() {
     CheckedMutex::Unlock();
     bool wake;
     u64 new_state;
@@ -297,13 +299,13 @@ class MUTEX Mutex : CheckedMutex {
   // owns the mutex but a child checks that it is locked. Rather than
   // maintaining complex state to work around those situations, the check only
   // checks that the mutex is owned.
-  void CheckWriteLocked() const CHECK_LOCKED() {
+  void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() {
     CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
   }
 
-  void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
+  void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); }
 
-  void CheckReadLocked() const CHECK_LOCKED() {
+  void CheckReadLocked() const SANITIZER_CHECK_LOCKED() {
     CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
   }
 
@@ -361,13 +363,13 @@ void FutexWait(atomic_uint32_t *p, u32 cmp);
 void FutexWake(atomic_uint32_t *p, u32 count);
 
 template <typename MutexType>
-class SCOPED_LOCK GenericScopedLock {
+class SANITIZER_SCOPED_LOCK GenericScopedLock {
  public:
-  explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
+  explicit GenericScopedLock(MutexType *mu) SANITIZER_ACQUIRE(mu) : mu_(mu) {
     mu_->Lock();
   }
 
-  ~GenericScopedLock() RELEASE() { mu_->Unlock(); }
+  ~GenericScopedLock() SANITIZER_RELEASE() { mu_->Unlock(); }
 
  private:
   MutexType *mu_;
@@ -377,13 +379,14 @@ class SCOPED_LOCK GenericScopedLock {
 };
 
 template <typename MutexType>
-class SCOPED_LOCK GenericScopedReadLock {
+class SANITIZER_SCOPED_LOCK GenericScopedReadLock {
  public:
-  explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
+  explicit GenericScopedReadLock(MutexType *mu) SANITIZER_ACQUIRE(mu)
+      : mu_(mu) {
     mu_->ReadLock();
   }
 
-  ~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
+  ~GenericScopedReadLock() SANITIZER_RELEASE() { mu_->ReadUnlock(); }
 
  private:
   MutexType *mu_;
@@ -393,10 +396,10 @@ class SCOPED_LOCK GenericScopedReadLock {
 };
 
 template <typename MutexType>
-class SCOPED_LOCK GenericScopedRWLock {
+class SANITIZER_SCOPED_LOCK GenericScopedRWLock {
  public:
   ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)
-      ACQUIRE(mu)
+      SANITIZER_ACQUIRE(mu)
       : mu_(mu), write_(write) {
     if (write_)
       mu_->Lock();
@@ -404,7 +407,7 @@ class SCOPED_LOCK GenericScopedRWLock {
       mu_->ReadLock();
   }
 
-  ALWAYS_INLINE ~GenericScopedRWLock() RELEASE() {
+  ALWAYS_INLINE ~GenericScopedRWLock() SANITIZER_RELEASE() {
     if (write_)
       mu_->Unlock();
     else

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
index 349cdbcda6d98..82048f0eae2e3 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_platform_limits_posix.cpp
@@ -170,13 +170,9 @@ typedef struct user_fpregs elf_fpregset_t;
 #endif
 
 // Include these after system headers to avoid name clashes and ambiguities.
-#include "sanitizer_internal_defs.h"
-#include "sanitizer_platform_limits_posix.h"
-
-// To prevent macro redefinition warning between our sanitizer_thread_safety.h
-// and system's scsi.h.
-#  undef RELEASE
 #  include "sanitizer_common.h"
+#  include "sanitizer_internal_defs.h"
+#  include "sanitizer_platform_limits_posix.h"
 
 namespace __sanitizer {
   unsigned struct_utsname_sz = sizeof(struct utsname);

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h b/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
index 1a074d2bb700c..4aa6054851666 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_quarantine.h
@@ -149,8 +149,8 @@ class Quarantine {
   Cache cache_;
   char pad2_[kCacheLineSize];
 
-  void NOINLINE Recycle(uptr min_size, Callback cb) REQUIRES(recycle_mutex_)
-      RELEASE(recycle_mutex_) {
+  void NOINLINE Recycle(uptr min_size, Callback cb)
+      SANITIZER_REQUIRES(recycle_mutex_) SANITIZER_RELEASE(recycle_mutex_) {
     Cache tmp;
     {
       SpinMutexLock l(&cache_mutex_);

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
index 1bfad811f712d..4f1a8caac6ed8 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
@@ -97,7 +97,7 @@ class StackStore {
       Packed,
       Unpacked,
     };
-    State state GUARDED_BY(mtx_);
+    State state SANITIZER_GUARDED_BY(mtx_);
 
     uptr *Create(StackStore *store);
 
@@ -109,8 +109,8 @@ class StackStore {
     void TestOnlyUnmap(StackStore *store);
     bool Stored(uptr n);
     bool IsPacked() const;
-    void Lock() NO_THREAD_SAFETY_ANALYSIS { mtx_.Lock(); }
-    void Unlock() NO_THREAD_SAFETY_ANALYSIS { mtx_.Unlock(); }
+    void Lock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Lock(); }
+    void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS { mtx_.Unlock(); }
   };
 
   BlockInfo blocks_[kBlockCount] = {};

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
index c755b1829d2a3..ac87fab3eaf1f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
@@ -94,8 +94,8 @@ class CompressThread {
   constexpr CompressThread() = default;
   void NewWorkNotify();
   void Stop();
-  void LockAndStop() NO_THREAD_SAFETY_ANALYSIS;
-  void Unlock() NO_THREAD_SAFETY_ANALYSIS;
+  void LockAndStop() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
+  void Unlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS;
 
  private:
   enum class State {
@@ -114,8 +114,8 @@ class CompressThread {
 
   Semaphore semaphore_ = {};
   StaticSpinMutex mutex_ = {};
-  State state_ GUARDED_BY(mutex_) = State::NotStarted;
-  void *thread_ GUARDED_BY(mutex_) = nullptr;
+  State state_ SANITIZER_GUARDED_BY(mutex_) = State::NotStarted;
+  void *thread_ SANITIZER_GUARDED_BY(mutex_) = nullptr;
   atomic_uint8_t run_ = {};
 };
 

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h b/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
index 9975d78ec0bb2..2c7e5c276fa1c 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_thread_registry.h
@@ -86,7 +86,7 @@ class ThreadContextBase {
 
 typedef ThreadContextBase* (*ThreadContextFactory)(u32 tid);
 
-class MUTEX ThreadRegistry {
+class SANITIZER_MUTEX ThreadRegistry {
  public:
   ThreadRegistry(ThreadContextFactory factory);
   ThreadRegistry(ThreadContextFactory factory, u32 max_threads,
@@ -95,9 +95,9 @@ class MUTEX ThreadRegistry {
                           uptr *alive = nullptr);
   uptr GetMaxAliveThreads();
 
-  void Lock() ACQUIRE() { mtx_.Lock(); }
-  void CheckLocked() const CHECK_LOCKED() { mtx_.CheckLocked(); }
-  void Unlock() RELEASE() { mtx_.Unlock(); }
+  void Lock() SANITIZER_ACQUIRE() { mtx_.Lock(); }
+  void CheckLocked() const SANITIZER_CHECK_LOCKED() { mtx_.CheckLocked(); }
+  void Unlock() SANITIZER_RELEASE() { mtx_.Unlock(); }
 
   // Should be guarded by ThreadRegistryLock.
   ThreadContextBase *GetThreadLocked(u32 tid) {

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h b/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h
index 52b25edaa7a3e..c34ea804da201 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_thread_safety.h
@@ -16,27 +16,34 @@
 #define SANITIZER_THREAD_SAFETY_H
 
 #if defined(__clang__)
-#  define THREAD_ANNOTATION(x) __attribute__((x))
+#  define SANITIZER_THREAD_ANNOTATION(x) __attribute__((x))
 #else
-#  define THREAD_ANNOTATION(x)
+#  define SANITIZER_THREAD_ANNOTATION(x)
 #endif
 
-#define MUTEX THREAD_ANNOTATION(capability("mutex"))
-#define SCOPED_LOCK THREAD_ANNOTATION(scoped_lockable)
-#define GUARDED_BY(x) THREAD_ANNOTATION(guarded_by(x))
-#define PT_GUARDED_BY(x) THREAD_ANNOTATION(pt_guarded_by(x))
-#define REQUIRES(...) THREAD_ANNOTATION(requires_capability(__VA_ARGS__))
-#define REQUIRES_SHARED(...) \
-  THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__))
-#define ACQUIRE(...) THREAD_ANNOTATION(acquire_capability(__VA_ARGS__))
-#define ACQUIRE_SHARED(...) \
-  THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__))
-#define TRY_ACQUIRE(...) THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__))
-#define RELEASE(...) THREAD_ANNOTATION(release_capability(__VA_ARGS__))
-#define RELEASE_SHARED(...) \
-  THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__))
-#define EXCLUDES(...) THREAD_ANNOTATION(locks_excluded(__VA_ARGS__))
-#define CHECK_LOCKED(...) THREAD_ANNOTATION(assert_capability(__VA_ARGS__))
-#define NO_THREAD_SAFETY_ANALYSIS THREAD_ANNOTATION(no_thread_safety_analysis)
+#define SANITIZER_MUTEX SANITIZER_THREAD_ANNOTATION(capability("mutex"))
+#define SANITIZER_SCOPED_LOCK SANITIZER_THREAD_ANNOTATION(scoped_lockable)
+#define SANITIZER_GUARDED_BY(x) SANITIZER_THREAD_ANNOTATION(guarded_by(x))
+#define SANITIZER_PT_GUARDED_BY(x) SANITIZER_THREAD_ANNOTATION(pt_guarded_by(x))
+#define SANITIZER_REQUIRES(...) \
+  SANITIZER_THREAD_ANNOTATION(requires_capability(__VA_ARGS__))
+#define SANITIZER_REQUIRES_SHARED(...) \
+  SANITIZER_THREAD_ANNOTATION(requires_shared_capability(__VA_ARGS__))
+#define SANITIZER_ACQUIRE(...) \
+  SANITIZER_THREAD_ANNOTATION(acquire_capability(__VA_ARGS__))
+#define SANITIZER_ACQUIRE_SHARED(...) \
+  SANITIZER_THREAD_ANNOTATION(acquire_shared_capability(__VA_ARGS__))
+#define SANITIZER_TRY_ACQUIRE(...) \
+  SANITIZER_THREAD_ANNOTATION(try_acquire_capability(__VA_ARGS__))
+#define SANITIZER_RELEASE(...) \
+  SANITIZER_THREAD_ANNOTATION(release_capability(__VA_ARGS__))
+#define SANITIZER_RELEASE_SHARED(...) \
+  SANITIZER_THREAD_ANNOTATION(release_shared_capability(__VA_ARGS__))
+#define SANITIZER_EXCLUDES(...) \
+  SANITIZER_THREAD_ANNOTATION(locks_excluded(__VA_ARGS__))
+#define SANITIZER_CHECK_LOCKED(...) \
+  SANITIZER_THREAD_ANNOTATION(assert_capability(__VA_ARGS__))
+#define SANITIZER_NO_THREAD_SAFETY_ANALYSIS \
+  SANITIZER_THREAD_ANNOTATION(no_thread_safety_analysis)
 
 #endif

diff  --git a/compiler-rt/lib/scudo/scudo_allocator.cpp b/compiler-rt/lib/scudo/scudo_allocator.cpp
index 172353fadb1f8..5b6ac8b354934 100644
--- a/compiler-rt/lib/scudo/scudo_allocator.cpp
+++ b/compiler-rt/lib/scudo/scudo_allocator.cpp
@@ -299,8 +299,9 @@ struct Allocator {
   NOINLINE bool isRssLimitExceeded();
 
   // Allocates a chunk.
-  void *allocate(uptr Size, uptr Alignment, AllocType Type,
-                 bool ForceZeroContents = false) NO_THREAD_SAFETY_ANALYSIS {
+  void *
+  allocate(uptr Size, uptr Alignment, AllocType Type,
+           bool ForceZeroContents = false) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
     initThreadMaybe();
 
     if (UNLIKELY(Alignment > MaxAlignment)) {
@@ -404,8 +405,8 @@ struct Allocator {
   // Place a chunk in the quarantine or directly deallocate it in the event of
   // a zero-sized quarantine, or if the size of the chunk is greater than the
   // quarantine chunk size threshold.
-  void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header,
-                                   uptr Size) NO_THREAD_SAFETY_ANALYSIS {
+  void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header, uptr Size)
+      SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
     const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize);
     if (BypassQuarantine) {
       UnpackedHeader NewHeader = *Header;

diff  --git a/compiler-rt/lib/scudo/scudo_tsd.h b/compiler-rt/lib/scudo/scudo_tsd.h
index e1310974db450..eef4a7ba1e656 100644
--- a/compiler-rt/lib/scudo/scudo_tsd.h
+++ b/compiler-rt/lib/scudo/scudo_tsd.h
@@ -29,7 +29,7 @@ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
   void init();
   void commitBack();
 
-  inline bool tryLock() TRY_ACQUIRE(true, Mutex) {
+  inline bool tryLock() SANITIZER_TRY_ACQUIRE(true, Mutex) {
     if (Mutex.TryLock()) {
       atomic_store_relaxed(&Precedence, 0);
       return true;
@@ -40,12 +40,12 @@ struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
     return false;
   }
 
-  inline void lock() ACQUIRE(Mutex) {
+  inline void lock() SANITIZER_ACQUIRE(Mutex) {
     atomic_store_relaxed(&Precedence, 0);
     Mutex.Lock();
   }
 
-  inline void unlock() RELEASE(Mutex) { Mutex.Unlock(); }
+  inline void unlock() SANITIZER_RELEASE(Mutex) { Mutex.Unlock(); }
 
   inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
 

diff  --git a/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc b/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
index 29db8a2eff1a0..dc4d982f2fa8b 100644
--- a/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
+++ b/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
@@ -34,7 +34,7 @@ ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
 }
 
 ALWAYS_INLINE ScudoTSD *
-getTSDAndLock(bool *UnlockRequired) NO_THREAD_SAFETY_ANALYSIS {
+getTSDAndLock(bool *UnlockRequired) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   if (UNLIKELY(ScudoThreadState != ThreadInitialized)) {
     FallbackTSD.lock();
     *UnlockRequired = true;

diff  --git a/compiler-rt/lib/scudo/scudo_tsd_shared.cpp b/compiler-rt/lib/scudo/scudo_tsd_shared.cpp
index fd85a7c4017f8..fc691b21a2135 100644
--- a/compiler-rt/lib/scudo/scudo_tsd_shared.cpp
+++ b/compiler-rt/lib/scudo/scudo_tsd_shared.cpp
@@ -64,7 +64,7 @@ void initThread(bool MinimalInit) {
   setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
 }
 
-ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD) NO_THREAD_SAFETY_ANALYSIS {
+ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   if (NumberOfTSDs > 1) {
     // Use the Precedence of the current TSD as our random seed. Since we are in
     // the slow path, it means that tryLock failed, and as a result it's very

diff  --git a/compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp
index 75044c38d5d23..86a3dcd332b23 100644
--- a/compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_mman.cpp
@@ -124,13 +124,13 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
   gp->mtx.Unlock();
 }
 
-void AllocatorLock() NO_THREAD_SAFETY_ANALYSIS {
+void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   global_proc()->mtx.Lock();
   global_proc()->internal_alloc_mtx.Lock();
   InternalAllocatorLock();
 }
 
-void AllocatorUnlock() NO_THREAD_SAFETY_ANALYSIS {
+void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   InternalAllocatorUnlock();
   global_proc()->internal_alloc_mtx.Unlock();
   global_proc()->mtx.Unlock();

diff  --git a/compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp
index c14af9788e32d..5b46d5f5e2bc7 100644
--- a/compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp
+++ b/compiler-rt/lib/tsan/rtl-old/tsan_rtl.cpp
@@ -521,7 +521,7 @@ int Finalize(ThreadState *thr) {
 }
 
 #if !SANITIZER_GO
-void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
+void ForkBefore(ThreadState *thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   ctx->thread_registry.Lock();
   ctx->report_mtx.Lock();
   ScopedErrorReportLock::Lock();
@@ -543,7 +543,8 @@ void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
   __tsan_test_only_on_fork();
 }
 
-void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
+void ForkParentAfter(ThreadState *thr,
+                     uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   thr->suppress_reports--;  // Enabled in ForkBefore.
   thr->ignore_interceptors--;
   thr->ignore_reads_and_writes--;
@@ -554,7 +555,7 @@ void ForkParentAfter(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
 }
 
 void ForkChildAfter(ThreadState *thr, uptr pc,
-                    bool start_thread) NO_THREAD_SAFETY_ANALYSIS {
+                    bool start_thread) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   thr->suppress_reports--;  // Enabled in ForkBefore.
   thr->ignore_interceptors--;
   thr->ignore_reads_and_writes--;

diff  --git a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
index 7a72efb12263a..00cc3a306fd3e 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_mman.cpp
@@ -124,21 +124,21 @@ ScopedGlobalProcessor::~ScopedGlobalProcessor() {
   gp->mtx.Unlock();
 }
 
-void AllocatorLock() NO_THREAD_SAFETY_ANALYSIS {
+void AllocatorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   global_proc()->internal_alloc_mtx.Lock();
   InternalAllocatorLock();
 }
 
-void AllocatorUnlock() NO_THREAD_SAFETY_ANALYSIS {
+void AllocatorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   InternalAllocatorUnlock();
   global_proc()->internal_alloc_mtx.Unlock();
 }
 
-void GlobalProcessorLock() NO_THREAD_SAFETY_ANALYSIS {
+void GlobalProcessorLock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   global_proc()->mtx.Lock();
 }
 
-void GlobalProcessorUnlock() NO_THREAD_SAFETY_ANALYSIS {
+void GlobalProcessorUnlock() SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   global_proc()->mtx.Unlock();
 }
 

diff  --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
index ed60e250cff80..c068d8e486b0b 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
@@ -113,7 +113,7 @@ static TracePart* TracePartAlloc(ThreadState* thr) {
   return part;
 }
 
-static void TracePartFree(TracePart* part) REQUIRES(ctx->slot_mtx) {
+static void TracePartFree(TracePart* part) SANITIZER_REQUIRES(ctx->slot_mtx) {
   DCHECK(part->trace);
   part->trace = nullptr;
   ctx->trace_part_recycle.PushFront(part);
@@ -208,7 +208,7 @@ static void DoResetImpl(uptr epoch) {
 
 // Clang does not understand locking all slots in the loop:
 // error: expecting mutex 'slot.mtx' to be held at start of each loop
-void DoReset(ThreadState* thr, uptr epoch) NO_THREAD_SAFETY_ANALYSIS {
+void DoReset(ThreadState* thr, uptr epoch) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   {
     for (auto& slot : ctx->slots) {
       slot.mtx.Lock();
@@ -230,7 +230,7 @@ void DoReset(ThreadState* thr, uptr epoch) NO_THREAD_SAFETY_ANALYSIS {
 void FlushShadowMemory() { DoReset(nullptr, 0); }
 
 static TidSlot* FindSlotAndLock(ThreadState* thr)
-    ACQUIRE(thr->slot->mtx) NO_THREAD_SAFETY_ANALYSIS {
+    SANITIZER_ACQUIRE(thr->slot->mtx) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   CHECK(!thr->slot);
   TidSlot* slot = nullptr;
   for (;;) {
@@ -334,7 +334,7 @@ void SlotDetach(ThreadState* thr) {
   SlotDetachImpl(thr, true);
 }
 
-void SlotLock(ThreadState* thr) NO_THREAD_SAFETY_ANALYSIS {
+void SlotLock(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   DCHECK(!thr->slot_locked);
 #if SANITIZER_DEBUG
   // Check these mutexes are not locked.
@@ -756,7 +756,7 @@ int Finalize(ThreadState *thr) {
 }
 
 #if !SANITIZER_GO
-void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
+void ForkBefore(ThreadState* thr, uptr pc) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   GlobalProcessorLock();
   // Detaching from the slot makes OnUserFree skip writing to the shadow.
   // The slot will be locked so any attempts to use it will deadlock anyway.
@@ -783,7 +783,7 @@ void ForkBefore(ThreadState *thr, uptr pc) NO_THREAD_SAFETY_ANALYSIS {
   __tsan_test_only_on_fork();
 }
 
-static void ForkAfter(ThreadState* thr) NO_THREAD_SAFETY_ANALYSIS {
+static void ForkAfter(ThreadState* thr) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   thr->suppress_reports--;  // Enabled in ForkBefore.
   thr->ignore_interceptors--;
   thr->ignore_reads_and_writes--;

diff  --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
index d06358b462eb1..fbf02806e34b7 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h
@@ -332,12 +332,12 @@ struct Context {
   Mutex slot_mtx;
   uptr global_epoch;  // guarded by slot_mtx and by all slot mutexes
   bool resetting;     // global reset is in progress
-  IList<TidSlot, &TidSlot::node> slot_queue GUARDED_BY(slot_mtx);
+  IList<TidSlot, &TidSlot::node> slot_queue SANITIZER_GUARDED_BY(slot_mtx);
   IList<TraceHeader, &TraceHeader::global, TracePart> trace_part_recycle
-      GUARDED_BY(slot_mtx);
-  uptr trace_part_total_allocated GUARDED_BY(slot_mtx);
-  uptr trace_part_recycle_finished GUARDED_BY(slot_mtx);
-  uptr trace_part_finished_excess GUARDED_BY(slot_mtx);
+      SANITIZER_GUARDED_BY(slot_mtx);
+  uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx);
+  uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx);
+  uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx);
 };
 
 extern Context *ctx;  // The one and the only global runtime context.
@@ -566,10 +566,10 @@ uptr ALWAYS_INLINE HeapEnd() {
 }
 #endif
 
-void SlotAttachAndLock(ThreadState *thr) ACQUIRE(thr->slot->mtx);
+void SlotAttachAndLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
 void SlotDetach(ThreadState *thr);
-void SlotLock(ThreadState *thr) ACQUIRE(thr->slot->mtx);
-void SlotUnlock(ThreadState *thr) RELEASE(thr->slot->mtx);
+void SlotLock(ThreadState *thr) SANITIZER_ACQUIRE(thr->slot->mtx);
+void SlotUnlock(ThreadState *thr) SANITIZER_RELEASE(thr->slot->mtx);
 void DoReset(ThreadState *thr, uptr epoch);
 void FlushShadowMemory();
 

diff  --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
index 940c20fcfa1ab..e77bfba277a57 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_access.cpp
@@ -156,7 +156,7 @@ ALWAYS_INLINE void StoreShadow(RawShadow* sp, RawShadow s) {
 
 NOINLINE void DoReportRace(ThreadState* thr, RawShadow* shadow_mem, Shadow cur,
                            Shadow old,
-                           AccessType typ) NO_THREAD_SAFETY_ANALYSIS {
+                           AccessType typ) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
   // For the free shadow markers the first element (that contains kFreeSid)
   // triggers the race, but the second element contains info about the freeing
   // thread, take it.


        


More information about the llvm-commits mailing list