[compiler-rt] r365790 - [scudo][standalone] Merge Spin & Blocking mutex into a Hybrid one

Kostya Kortchinsky via llvm-commits llvm-commits at lists.llvm.org
Thu Jul 11 08:32:27 PDT 2019


Author: cryptoad
Date: Thu Jul 11 08:32:26 2019
New Revision: 365790

URL: http://llvm.org/viewvc/llvm-project?rev=365790&view=rev
Log:
[scudo][standalone] Merge Spin & Blocking mutex into a Hybrid one

Summary:
We ran into a problem on Fuchsia where yielding threads would never
be deboosted, ultimately resulting in several threads spinning on the
same TSD, and no possibility for another thread to be scheduled,
dead-locking the process.

While this was fixed in Zircon, this lead to discussions about if
spinning without a break condition was a good decision, and settled on
a new hybrid model that would spin for a while then block.

Currently we are using a number of iterations for spinning that is
mostly arbitrary (based on sanitizer_common values), but this can
be tuned in the future.

Since we are touching `common.h`, we also use this change as a vehicle
for an Android optimization (the page size is fixed in Bionic, so use
a fixed value too).

Reviewers: morehouse, hctim, eugenis, dvyukov, vitalybuka

Reviewed By: hctim

Subscribers: srhines, delcypher, jfb, #sanitizers, llvm-commits

Tags: #llvm, #sanitizers

Differential Revision: https://reviews.llvm.org/D64358

Modified:
    compiler-rt/trunk/lib/scudo/standalone/atomic_helpers.h
    compiler-rt/trunk/lib/scudo/standalone/bytemap.h
    compiler-rt/trunk/lib/scudo/standalone/common.h
    compiler-rt/trunk/lib/scudo/standalone/fuchsia.cc
    compiler-rt/trunk/lib/scudo/standalone/linux.cc
    compiler-rt/trunk/lib/scudo/standalone/mutex.h
    compiler-rt/trunk/lib/scudo/standalone/primary32.h
    compiler-rt/trunk/lib/scudo/standalone/primary64.h
    compiler-rt/trunk/lib/scudo/standalone/quarantine.h
    compiler-rt/trunk/lib/scudo/standalone/secondary.cc
    compiler-rt/trunk/lib/scudo/standalone/secondary.h
    compiler-rt/trunk/lib/scudo/standalone/stats.h
    compiler-rt/trunk/lib/scudo/standalone/tests/map_test.cc
    compiler-rt/trunk/lib/scudo/standalone/tests/mutex_test.cc
    compiler-rt/trunk/lib/scudo/standalone/tsd.h
    compiler-rt/trunk/lib/scudo/standalone/tsd_exclusive.h
    compiler-rt/trunk/lib/scudo/standalone/tsd_shared.h

Modified: compiler-rt/trunk/lib/scudo/standalone/atomic_helpers.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/atomic_helpers.h?rev=365790&r1=365789&r2=365790&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/atomic_helpers.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/atomic_helpers.h Thu Jul 11 08:32:26 2019
@@ -126,6 +126,14 @@ INLINE void atomic_store_relaxed(volatil
   atomic_store(A, V, memory_order_relaxed);
 }
 
+template <typename T>
+INLINE typename T::Type atomic_compare_exchange(volatile T *A,
+                                                typename T::Type Cmp,
+                                                typename T::Type Xchg) {
+  atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
+  return Cmp;
+}
+
 } // namespace scudo
 
 #endif // SCUDO_ATOMIC_H_

Modified: compiler-rt/trunk/lib/scudo/standalone/bytemap.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/bytemap.h?rev=365790&r1=365789&r2=365790&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/bytemap.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/bytemap.h Thu Jul 11 08:32:26 2019
@@ -45,8 +45,8 @@ public:
         map(nullptr, sizeof(atomic_uptr) * Level1Size, "scudo:bytemap"));
   }
   void init() {
-    initLinkerInitialized();
     Mutex.init();
+    initLinkerInitialized();
   }
 
   void reset() {
@@ -92,7 +92,7 @@ private:
   u8 *getOrCreate(uptr Index) {
     u8 *Res = get(Index);
     if (!Res) {
-      SpinMutexLock L(&Mutex);
+      ScopedLock L(Mutex);
       if (!(Res = get(Index))) {
         Res = reinterpret_cast<u8 *>(map(nullptr, Level2Size, "scudo:bytemap"));
         atomic_store(&Level1Map[Index], reinterpret_cast<uptr>(Res),
@@ -103,7 +103,7 @@ private:
   }
 
   atomic_uptr *Level1Map;
-  StaticSpinMutex Mutex;
+  HybridMutex Mutex;
 };
 
 } // namespace scudo

Modified: compiler-rt/trunk/lib/scudo/standalone/common.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/common.h?rev=365790&r1=365789&r2=365790&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/common.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/common.h Thu Jul 11 08:32:26 2019
@@ -115,11 +115,12 @@ INLINE void yieldProcessor(u8 Count) {
 
 // Platform specific functions.
 
-void yieldPlatform();
-
 extern uptr PageSizeCached;
 uptr getPageSizeSlow();
 INLINE uptr getPageSizeCached() {
+  // Bionic uses a hardcoded value.
+  if (SCUDO_ANDROID)
+    return 4096U;
   if (LIKELY(PageSizeCached))
     return PageSizeCached;
   return getPageSizeSlow();

Modified: compiler-rt/trunk/lib/scudo/standalone/fuchsia.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/fuchsia.cc?rev=365790&r1=365789&r2=365790&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/fuchsia.cc (original)
+++ compiler-rt/trunk/lib/scudo/standalone/fuchsia.cc Thu Jul 11 08:32:26 2019
@@ -23,11 +23,6 @@
 
 namespace scudo {
 
-void yieldPlatform() {
-  const zx_status_t Status = _zx_nanosleep(0);
-  CHECK_EQ(Status, ZX_OK);
-}
-
 uptr getPageSize() { return PAGE_SIZE; }
 
 void NORETURN die() { __builtin_trap(); }
@@ -155,18 +150,20 @@ const char *getEnv(const char *Name) { r
 // Note: we need to flag these methods with __TA_NO_THREAD_SAFETY_ANALYSIS
 // because the Fuchsia implementation of sync_mutex_t has clang thread safety
 // annotations. Were we to apply proper capability annotations to the top level
-// BlockingMutex class itself, they would not be needed. As it stands, the
+// HybridMutex class itself, they would not be needed. As it stands, the
 // thread analysis thinks that we are locking the mutex and accidentally leaving
 // it locked on the way out.
-void BlockingMutex::lock() __TA_NO_THREAD_SAFETY_ANALYSIS {
+bool HybridMutex::tryLock() __TA_NO_THREAD_SAFETY_ANALYSIS {
   // Size and alignment must be compatible between both types.
-  COMPILER_CHECK(sizeof(sync_mutex_t) <= sizeof(OpaqueStorage));
-  COMPILER_CHECK(!(alignof(decltype(OpaqueStorage)) % alignof(sync_mutex_t)));
-  sync_mutex_lock(reinterpret_cast<sync_mutex_t *>(OpaqueStorage));
+  return sync_mutex_trylock(&M) == ZX_OK;
+}
+
+void HybridMutex::lockSlow() __TA_NO_THREAD_SAFETY_ANALYSIS {
+  sync_mutex_lock(&M);
 }
 
-void BlockingMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
-  sync_mutex_unlock(reinterpret_cast<sync_mutex_t *>(OpaqueStorage));
+void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
+  sync_mutex_unlock(&M);
 }
 
 u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }

Modified: compiler-rt/trunk/lib/scudo/standalone/linux.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/linux.cc?rev=365790&r1=365789&r2=365790&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/linux.cc (original)
+++ compiler-rt/trunk/lib/scudo/standalone/linux.cc Thu Jul 11 08:32:26 2019
@@ -37,8 +37,6 @@
 
 namespace scudo {
 
-void yieldPlatform() { sched_yield(); }
-
 uptr getPageSize() { return static_cast<uptr>(sysconf(_SC_PAGESIZE)); }
 
 void NORETURN die() { abort(); }
@@ -46,15 +44,18 @@ void NORETURN die() { abort(); }
 void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
           UNUSED MapPlatformData *Data) {
   int MmapFlags = MAP_PRIVATE | MAP_ANON;
-  if (Flags & MAP_NOACCESS)
+  int MmapProt;
+  if (Flags & MAP_NOACCESS) {
     MmapFlags |= MAP_NORESERVE;
+    MmapProt = PROT_NONE;
+  } else {
+    MmapProt = PROT_READ | PROT_WRITE;
+  }
   if (Addr) {
     // Currently no scenario for a noaccess mapping with a fixed address.
     DCHECK_EQ(Flags & MAP_NOACCESS, 0);
     MmapFlags |= MAP_FIXED;
   }
-  const int MmapProt =
-      (Flags & MAP_NOACCESS) ? PROT_NONE : PROT_READ | PROT_WRITE;
   void *P = mmap(Addr, Size, MmapProt, MmapFlags, -1, 0);
   if (P == MAP_FAILED) {
     if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
@@ -84,22 +85,34 @@ void releasePagesToOS(uptr BaseAddress,
 // Calling getenv should be fine (c)(tm) at any time.
 const char *getEnv(const char *Name) { return getenv(Name); }
 
-void BlockingMutex::lock() {
-  atomic_u32 *M = reinterpret_cast<atomic_u32 *>(&OpaqueStorage);
-  if (atomic_exchange(M, MtxLocked, memory_order_acquire) == MtxUnlocked)
+namespace {
+enum State : u32 { Unlocked = 0, Locked = 1, Sleeping = 2 };
+}
+
+bool HybridMutex::tryLock() {
+  return atomic_compare_exchange(&M, Unlocked, Locked) == Unlocked;
+}
+
+// The following is based on https://akkadia.org/drepper/futex.pdf.
+void HybridMutex::lockSlow() {
+  u32 V = atomic_compare_exchange(&M, Unlocked, Locked);
+  if (V == Unlocked)
     return;
-  while (atomic_exchange(M, MtxSleeping, memory_order_acquire) != MtxUnlocked)
-    syscall(SYS_futex, reinterpret_cast<uptr>(OpaqueStorage),
-            FUTEX_WAIT_PRIVATE, MtxSleeping, nullptr, nullptr, 0);
+  if (V != Sleeping)
+    V = atomic_exchange(&M, Sleeping, memory_order_acquire);
+  while (V != Unlocked) {
+    syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAIT_PRIVATE, Sleeping,
+            nullptr, nullptr, 0);
+    V = atomic_exchange(&M, Sleeping, memory_order_acquire);
+  }
 }
 
-void BlockingMutex::unlock() {
-  atomic_u32 *M = reinterpret_cast<atomic_u32 *>(&OpaqueStorage);
-  const u32 V = atomic_exchange(M, MtxUnlocked, memory_order_release);
-  DCHECK_NE(V, MtxUnlocked);
-  if (V == MtxSleeping)
-    syscall(SYS_futex, reinterpret_cast<uptr>(OpaqueStorage),
-            FUTEX_WAKE_PRIVATE, 1, nullptr, nullptr, 0);
+void HybridMutex::unlock() {
+  if (atomic_fetch_sub(&M, 1U, memory_order_release) != Locked) {
+    atomic_store(&M, Unlocked, memory_order_release);
+    syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAKE_PRIVATE, 1,
+            nullptr, nullptr, 0);
+  }
 }
 
 u64 getMonotonicTime() {
@@ -141,8 +154,8 @@ bool getRandom(void *Buffer, uptr Length
 }
 
 void outputRaw(const char *Buffer) {
-  static StaticSpinMutex Mutex;
-  SpinMutexLock L(&Mutex);
+  static HybridMutex Mutex;
+  ScopedLock L(Mutex);
   write(2, Buffer, strlen(Buffer));
 }
 

Modified: compiler-rt/trunk/lib/scudo/standalone/mutex.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/mutex.h?rev=365790&r1=365789&r2=365790&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/mutex.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/mutex.h Thu Jul 11 08:32:26 2019
@@ -12,82 +12,62 @@
 #include "atomic_helpers.h"
 #include "common.h"
 
+#include <string.h>
+
+#if SCUDO_FUCHSIA
+#include <lib/sync/mutex.h> // for sync_mutex_t
+#endif
+
 namespace scudo {
 
-class StaticSpinMutex {
+class HybridMutex {
 public:
-  void init() { atomic_store_relaxed(&State, 0); }
-
-  void lock() {
+  void init() { memset(this, 0, sizeof(*this)); }
+  bool tryLock();
+  NOINLINE void lock() {
     if (tryLock())
       return;
-    lockSlow();
-  }
-
-  bool tryLock() {
-    return atomic_exchange(&State, 1, memory_order_acquire) == 0;
-  }
-
-  void unlock() { atomic_store(&State, 0, memory_order_release); }
-
-  void checkLocked() { CHECK_EQ(atomic_load_relaxed(&State), 1); }
-
-private:
-  atomic_u8 State;
-
-  void NOINLINE lockSlow() {
-    for (u32 I = 0;; I++) {
-      if (I < 10)
-        yieldProcessor(10);
-      else
-        yieldPlatform();
-      if (atomic_load_relaxed(&State) == 0 &&
-          atomic_exchange(&State, 1, memory_order_acquire) == 0)
+      // The compiler may try to fully unroll the loop, ending up in a
+      // NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This
+      // is large, ugly and unneeded, a compact loop is better for our purpose
+      // here. Use a pragma to tell the compiler not to unroll the loop.
+#ifdef __clang__
+#pragma nounroll
+#endif
+    for (u8 I = 0U; I < NumberOfTries; I++) {
+      yieldProcessor(NumberOfYields);
+      if (tryLock())
         return;
     }
+    lockSlow();
   }
-};
-
-class SpinMutex : public StaticSpinMutex {
-public:
-  SpinMutex() { init(); }
+  void unlock();
 
 private:
-  SpinMutex(const SpinMutex &) = delete;
-  void operator=(const SpinMutex &) = delete;
-};
+  static constexpr u8 NumberOfTries = 10U;
+  static constexpr u8 NumberOfYields = 10U;
 
-class BlockingMutex {
-public:
-  explicit constexpr BlockingMutex(LinkerInitialized) : OpaqueStorage{} {}
-  BlockingMutex() { memset(this, 0, sizeof(*this)); }
-  void lock();
-  void unlock();
-  void checkLocked() {
-    atomic_u32 *M = reinterpret_cast<atomic_u32 *>(&OpaqueStorage);
-    CHECK_NE(MtxUnlocked, atomic_load_relaxed(M));
-  }
+#if SCUDO_LINUX
+  atomic_u32 M;
+#elif SCUDO_FUCHSIA
+  sync_mutex_t M;
+#endif
 
-private:
-  enum MutexState { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
-  uptr OpaqueStorage[1];
+  void lockSlow();
 };
 
-template <typename MutexType> class GenericScopedLock {
+class ScopedLock {
 public:
-  explicit GenericScopedLock(MutexType *M) : Mutex(M) { Mutex->lock(); }
-  ~GenericScopedLock() { Mutex->unlock(); }
+  explicit ScopedLock(HybridMutex &M) : Mutex(M) { Mutex.lock(); }
+  ~ScopedLock() { Mutex.unlock(); }
 
 private:
-  MutexType *Mutex;
+  HybridMutex &Mutex;
 
-  GenericScopedLock(const GenericScopedLock &) = delete;
-  void operator=(const GenericScopedLock &) = delete;
+  ScopedLock(const ScopedLock &) = delete;
+  void operator=(const ScopedLock &) = delete;
 };
 
-typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
-typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
-
 } // namespace scudo
 
 #endif // SCUDO_MUTEX_H_

Modified: compiler-rt/trunk/lib/scudo/standalone/primary32.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/primary32.h?rev=365790&r1=365789&r2=365790&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/primary32.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/primary32.h Thu Jul 11 08:32:26 2019
@@ -97,7 +97,7 @@ public:
   TransferBatch *popBatch(CacheT *C, uptr ClassId) {
     DCHECK_LT(ClassId, NumClasses);
     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
-    BlockingMutexLock L(&Sci->Mutex);
+    ScopedLock L(Sci->Mutex);
     TransferBatch *B = Sci->FreeList.front();
     if (B)
       Sci->FreeList.pop_front();
@@ -115,7 +115,7 @@ public:
     DCHECK_LT(ClassId, NumClasses);
     DCHECK_GT(B->getCount(), 0);
     SizeClassInfo *Sci = getSizeClassInfo(ClassId);
-    BlockingMutexLock L(&Sci->Mutex);
+    ScopedLock L(Sci->Mutex);
     Sci->FreeList.push_front(B);
     Sci->Stats.PushedBlocks += B->getCount();
     if (Sci->CanRelease)
@@ -164,7 +164,7 @@ public:
   void releaseToOS() {
     for (uptr I = 1; I < NumClasses; I++) {
       SizeClassInfo *Sci = getSizeClassInfo(I);
-      BlockingMutexLock L(&Sci->Mutex);
+      ScopedLock L(Sci->Mutex);
       releaseToOSMaybe(Sci, I, /*Force=*/true);
     }
   }
@@ -192,7 +192,7 @@ private:
   };
 
   struct ALIGNED(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
-    BlockingMutex Mutex;
+    HybridMutex Mutex;
     IntrusiveList<TransferBatch> FreeList;
     SizeClassStats Stats;
     bool CanRelease;
@@ -217,7 +217,7 @@ private:
     const uptr MapEnd = MapBase + MapSize;
     uptr Region = MapBase;
     if (isAligned(Region, RegionSize)) {
-      SpinMutexLock L(&RegionsStashMutex);
+      ScopedLock L(RegionsStashMutex);
       if (NumberOfStashedRegions < MaxStashedRegions)
         RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
       else
@@ -237,7 +237,7 @@ private:
     DCHECK_LT(ClassId, NumClasses);
     uptr Region = 0;
     {
-      SpinMutexLock L(&RegionsStashMutex);
+      ScopedLock L(RegionsStashMutex);
       if (NumberOfStashedRegions > 0)
         Region = RegionsStash[--NumberOfStashedRegions];
     }
@@ -389,7 +389,7 @@ private:
   // Unless several threads request regions simultaneously from different size
   // classes, the stash rarely contains more than 1 entry.
   static constexpr uptr MaxStashedRegions = 4;
-  StaticSpinMutex RegionsStashMutex;
+  HybridMutex RegionsStashMutex;
   uptr NumberOfStashedRegions;
   uptr RegionsStash[MaxStashedRegions];
 };

Modified: compiler-rt/trunk/lib/scudo/standalone/primary64.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/primary64.h?rev=365790&r1=365789&r2=365790&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/primary64.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/primary64.h Thu Jul 11 08:32:26 2019
@@ -100,7 +100,7 @@ public:
   TransferBatch *popBatch(CacheT *C, uptr ClassId) {
     DCHECK_LT(ClassId, NumClasses);
     RegionInfo *Region = getRegionInfo(ClassId);
-    BlockingMutexLock L(&Region->Mutex);
+    ScopedLock L(Region->Mutex);
     TransferBatch *B = Region->FreeList.front();
     if (B)
       Region->FreeList.pop_front();
@@ -117,7 +117,7 @@ public:
   void pushBatch(uptr ClassId, TransferBatch *B) {
     DCHECK_GT(B->getCount(), 0);
     RegionInfo *Region = getRegionInfo(ClassId);
-    BlockingMutexLock L(&Region->Mutex);
+    ScopedLock L(Region->Mutex);
     Region->FreeList.push_front(B);
     Region->Stats.PushedBlocks += B->getCount();
     if (Region->CanRelease)
@@ -168,7 +168,7 @@ public:
   void releaseToOS() {
     for (uptr I = 1; I < NumClasses; I++) {
       RegionInfo *Region = getRegionInfo(I);
-      BlockingMutexLock L(&Region->Mutex);
+      ScopedLock L(Region->Mutex);
       releaseToOSMaybe(Region, I, /*Force=*/true);
     }
   }
@@ -194,7 +194,7 @@ private:
   };
 
   struct ALIGNED(SCUDO_CACHE_LINE_SIZE) RegionInfo {
-    BlockingMutex Mutex;
+    HybridMutex Mutex;
     IntrusiveList<TransferBatch> FreeList;
     RegionStats Stats;
     bool CanRelease;

Modified: compiler-rt/trunk/lib/scudo/standalone/quarantine.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/quarantine.h?rev=365790&r1=365789&r2=365790&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/quarantine.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/quarantine.h Thu Jul 11 08:32:26 2019
@@ -202,7 +202,7 @@ public:
 
   void NOINLINE drain(CacheT *C, Callback Cb) {
     {
-      SpinMutexLock L(&CacheMutex);
+      ScopedLock L(CacheMutex);
       Cache.transfer(C);
     }
     if (Cache.getSize() > getMaxSize() && RecyleMutex.tryLock())
@@ -211,7 +211,7 @@ public:
 
   void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) {
     {
-      SpinMutexLock L(&CacheMutex);
+      ScopedLock L(CacheMutex);
       Cache.transfer(C);
     }
     RecyleMutex.lock();
@@ -227,9 +227,9 @@ public:
 
 private:
   // Read-only data.
-  alignas(SCUDO_CACHE_LINE_SIZE) StaticSpinMutex CacheMutex;
+  alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
   CacheT Cache;
-  alignas(SCUDO_CACHE_LINE_SIZE) StaticSpinMutex RecyleMutex;
+  alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecyleMutex;
   atomic_uptr MinSize;
   atomic_uptr MaxSize;
   alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize;
@@ -238,7 +238,7 @@ private:
     CacheT Tmp;
     Tmp.init();
     {
-      SpinMutexLock L(&CacheMutex);
+      ScopedLock L(CacheMutex);
       // Go over the batches and merge partially filled ones to
       // save some memory, otherwise batches themselves (since the memory used
       // by them is counted against quarantine limit) can overcome the actual

Modified: compiler-rt/trunk/lib/scudo/standalone/secondary.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/secondary.cc?rev=365790&r1=365789&r2=365790&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/secondary.cc (original)
+++ compiler-rt/trunk/lib/scudo/standalone/secondary.cc Thu Jul 11 08:32:26 2019
@@ -72,7 +72,7 @@ void *MapAllocator::allocate(uptr Size,
   H->BlockEnd = CommitBase + CommitSize;
   H->Data = Data;
   {
-    SpinMutexLock L(&Mutex);
+    ScopedLock L(Mutex);
     if (!Tail) {
       Tail = H;
     } else {
@@ -95,7 +95,7 @@ void *MapAllocator::allocate(uptr Size,
 void MapAllocator::deallocate(void *Ptr) {
   LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
   {
-    SpinMutexLock L(&Mutex);
+    ScopedLock L(Mutex);
     LargeBlock::Header *Prev = H->Prev;
     LargeBlock::Header *Next = H->Next;
     if (Prev) {

Modified: compiler-rt/trunk/lib/scudo/standalone/secondary.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/secondary.h?rev=365790&r1=365789&r2=365790&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/secondary.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/secondary.h Thu Jul 11 08:32:26 2019
@@ -82,7 +82,7 @@ public:
   }
 
 private:
-  StaticSpinMutex Mutex;
+  HybridMutex Mutex;
   LargeBlock::Header *Tail;
   uptr AllocatedBytes;
   uptr FreedBytes;

Modified: compiler-rt/trunk/lib/scudo/standalone/stats.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/stats.h?rev=365790&r1=365789&r2=365790&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/stats.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/stats.h Thu Jul 11 08:32:26 2019
@@ -65,7 +65,7 @@ public:
   }
 
   void link(LocalStats *S) {
-    SpinMutexLock L(&Mutex);
+    ScopedLock L(Mutex);
     S->Next = Next;
     S->Prev = this;
     Next->Prev = S;
@@ -73,7 +73,7 @@ public:
   }
 
   void unlink(LocalStats *S) {
-    SpinMutexLock L(&Mutex);
+    ScopedLock L(Mutex);
     S->Prev->Next = S->Next;
     S->Next->Prev = S->Prev;
     for (uptr I = 0; I < StatCount; I++)
@@ -82,7 +82,7 @@ public:
 
   void get(uptr *S) const {
     memset(S, 0, StatCount * sizeof(uptr));
-    SpinMutexLock L(&Mutex);
+    ScopedLock L(Mutex);
     const LocalStats *Stats = this;
     for (;;) {
       for (uptr I = 0; I < StatCount; I++)
@@ -97,7 +97,7 @@ public:
   }
 
 private:
-  mutable StaticSpinMutex Mutex;
+  mutable HybridMutex Mutex;
 };
 
 } // namespace scudo

Modified: compiler-rt/trunk/lib/scudo/standalone/tests/map_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/tests/map_test.cc?rev=365790&r1=365789&r2=365790&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/tests/map_test.cc (original)
+++ compiler-rt/trunk/lib/scudo/standalone/tests/map_test.cc Thu Jul 11 08:32:26 2019
@@ -11,9 +11,15 @@
 #include "gtest/gtest.h"
 
 #include <string.h>
+#include <unistd.h>
 
 static const char *MappingName = "scudo:test";
 
+TEST(ScudoMapTest, PageSize) {
+  EXPECT_EQ(scudo::getPageSizeCached(),
+            static_cast<scudo::uptr>(getpagesize()));
+}
+
 TEST(ScudoMapTest, MapNoAccessUnmap) {
   const scudo::uptr Size = 4 * scudo::getPageSizeCached();
   scudo::MapPlatformData Data = {};

Modified: compiler-rt/trunk/lib/scudo/standalone/tests/mutex_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/tests/mutex_test.cc?rev=365790&r1=365789&r2=365790&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/tests/mutex_test.cc (original)
+++ compiler-rt/trunk/lib/scudo/standalone/tests/mutex_test.cc Thu Jul 11 08:32:26 2019
@@ -12,15 +12,15 @@
 
 #include <string.h>
 
-template <typename MutexType> class TestData {
+class TestData {
 public:
-  explicit TestData(MutexType *M) : Mutex(M) {
+  explicit TestData(scudo::HybridMutex &M) : Mutex(M) {
     for (scudo::u32 I = 0; I < Size; I++)
       Data[I] = 0;
   }
 
   void write() {
-    Lock L(Mutex);
+    scudo::ScopedLock L(Mutex);
     T V0 = Data[0];
     for (scudo::u32 I = 0; I < Size; I++) {
       EXPECT_EQ(Data[I], V0);
@@ -29,14 +29,14 @@ public:
   }
 
   void tryWrite() {
-    if (!Mutex->tryLock())
+    if (!Mutex.tryLock())
       return;
     T V0 = Data[0];
     for (scudo::u32 I = 0; I < Size; I++) {
       EXPECT_EQ(Data[I], V0);
       Data[I]++;
     }
-    Mutex->unlock();
+    Mutex.unlock();
   }
 
   void backoff() {
@@ -48,10 +48,9 @@ public:
   }
 
 private:
-  typedef scudo::GenericScopedLock<MutexType> Lock;
   static const scudo::u32 Size = 64U;
   typedef scudo::u64 T;
-  MutexType *Mutex;
+  scudo::HybridMutex &Mutex;
   ALIGNED(SCUDO_CACHE_LINE_SIZE) T Data[Size];
 };
 
@@ -62,8 +61,8 @@ const scudo::u32 NumberOfIterations = 4
 const scudo::u32 NumberOfIterations = 16 * 1024;
 #endif
 
-template <typename MutexType> static void *lockThread(void *Param) {
-  TestData<MutexType> *Data = reinterpret_cast<TestData<MutexType> *>(Param);
+static void *lockThread(void *Param) {
+  TestData *Data = reinterpret_cast<TestData *>(Param);
   for (scudo::u32 I = 0; I < NumberOfIterations; I++) {
     Data->write();
     Data->backoff();
@@ -71,8 +70,8 @@ template <typename MutexType> static voi
   return 0;
 }
 
-template <typename MutexType> static void *tryThread(void *Param) {
-  TestData<MutexType> *Data = reinterpret_cast<TestData<MutexType> *>(Param);
+static void *tryThread(void *Param) {
+  TestData *Data = reinterpret_cast<TestData *>(Param);
   for (scudo::u32 I = 0; I < NumberOfIterations; I++) {
     Data->tryWrite();
     Data->backoff();
@@ -80,42 +79,24 @@ template <typename MutexType> static voi
   return 0;
 }
 
-template <typename MutexType> static void checkLocked(MutexType *M) {
-  scudo::GenericScopedLock<MutexType> L(M);
-  M->checkLocked();
-}
-
-TEST(ScudoMutexTest, SpinMutex) {
-  scudo::SpinMutex M;
+TEST(ScudoMutexTest, Mutex) {
+  scudo::HybridMutex M;
   M.init();
-  TestData<scudo::SpinMutex> Data(&M);
+  TestData Data(M);
   pthread_t Threads[NumberOfThreads];
   for (scudo::u32 I = 0; I < NumberOfThreads; I++)
-    pthread_create(&Threads[I], 0, lockThread<scudo::SpinMutex>, &Data);
+    pthread_create(&Threads[I], 0, lockThread, &Data);
   for (scudo::u32 I = 0; I < NumberOfThreads; I++)
     pthread_join(Threads[I], 0);
 }
 
-TEST(ScudoMutexTest, SpinMutexTry) {
-  scudo::SpinMutex M;
+TEST(ScudoMutexTest, MutexTry) {
+  scudo::HybridMutex M;
   M.init();
-  TestData<scudo::SpinMutex> Data(&M);
-  pthread_t Threads[NumberOfThreads];
-  for (scudo::u32 I = 0; I < NumberOfThreads; I++)
-    pthread_create(&Threads[I], 0, tryThread<scudo::SpinMutex>, &Data);
-  for (scudo::u32 I = 0; I < NumberOfThreads; I++)
-    pthread_join(Threads[I], 0);
-}
-
-TEST(ScudoMutexTest, BlockingMutex) {
-  scudo::u64 MutexMemory[1024] = {};
-  scudo::BlockingMutex *M =
-      new (MutexMemory) scudo::BlockingMutex(scudo::LINKER_INITIALIZED);
-  TestData<scudo::BlockingMutex> Data(M);
+  TestData Data(M);
   pthread_t Threads[NumberOfThreads];
   for (scudo::u32 I = 0; I < NumberOfThreads; I++)
-    pthread_create(&Threads[I], 0, lockThread<scudo::BlockingMutex>, &Data);
+    pthread_create(&Threads[I], 0, tryThread, &Data);
   for (scudo::u32 I = 0; I < NumberOfThreads; I++)
     pthread_join(Threads[I], 0);
-  checkLocked(M);
 }

Modified: compiler-rt/trunk/lib/scudo/standalone/tsd.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/tsd.h?rev=365790&r1=365789&r2=365790&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/tsd.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/tsd.h Thu Jul 11 08:32:26 2019
@@ -57,7 +57,7 @@ template <class Allocator> struct ALIGNE
   INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
 
 private:
-  StaticSpinMutex Mutex;
+  HybridMutex Mutex;
   atomic_uptr Precedence;
 };
 

Modified: compiler-rt/trunk/lib/scudo/standalone/tsd_exclusive.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/tsd_exclusive.h?rev=365790&r1=365789&r2=365790&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/tsd_exclusive.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/tsd_exclusive.h Thu Jul 11 08:32:26 2019
@@ -60,7 +60,7 @@ template <class Allocator> struct TSDReg
 
 private:
   void initOnceMaybe(Allocator *Instance) {
-    SpinMutexLock L(&Mutex);
+    ScopedLock L(Mutex);
     if (Initialized)
       return;
     initLinkerInitialized(Instance); // Sets Initialized.
@@ -82,7 +82,7 @@ private:
   pthread_key_t PThreadKey;
   bool Initialized;
   TSD<Allocator> *FallbackTSD;
-  StaticSpinMutex Mutex;
+  HybridMutex Mutex;
   static THREADLOCAL ThreadState State;
   static THREADLOCAL TSD<Allocator> ThreadTSD;
 

Modified: compiler-rt/trunk/lib/scudo/standalone/tsd_shared.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/standalone/tsd_shared.h?rev=365790&r1=365789&r2=365790&view=diff
==============================================================================
--- compiler-rt/trunk/lib/scudo/standalone/tsd_shared.h (original)
+++ compiler-rt/trunk/lib/scudo/standalone/tsd_shared.h Thu Jul 11 08:32:26 2019
@@ -94,7 +94,7 @@ private:
   }
 
   void initOnceMaybe(Allocator *Instance) {
-    SpinMutexLock L(&Mutex);
+    ScopedLock L(Mutex);
     if (Initialized)
       return;
     initLinkerInitialized(Instance); // Sets Initialized.
@@ -152,7 +152,7 @@ private:
   u32 NumberOfCoPrimes;
   u32 CoPrimes[MaxTSDCount];
   bool Initialized;
-  StaticSpinMutex Mutex;
+  HybridMutex Mutex;
 #if SCUDO_LINUX && !SCUDO_ANDROID
   static THREADLOCAL TSD<Allocator> *ThreadTSD;
 #endif




More information about the llvm-commits mailing list