[compiler-rt] 0224399 - sanitizer_common: add deadlock detection to the Mutex2

Dmitry Vyukov via llvm-commits llvm-commits at lists.llvm.org
Fri Jul 23 00:11:59 PDT 2021


Author: Dmitry Vyukov
Date: 2021-07-23T09:11:52+02:00
New Revision: 022439931f5be77efaf80b44d587666b0c9b13b5

URL: https://github.com/llvm/llvm-project/commit/022439931f5be77efaf80b44d587666b0c9b13b5
DIFF: https://github.com/llvm/llvm-project/commit/022439931f5be77efaf80b44d587666b0c9b13b5.diff

LOG: sanitizer_common: add deadlock detection to the Mutex2

    Copy internal deadlock detector from tsan to sanitizer_common
    (with some cosmetic changes).
    Tsan version will be deleted in subsequent changes.
    This allows us to switch tsan to the sanitizer_common mutex
    and remove tsan's mutex.

Reviewed By: vitalybuka, melver

Differential Revision: https://reviews.llvm.org/D106546

Added: 
    

Modified: 
    compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp
    compiler-rt/lib/sanitizer_common/sanitizer_mutex.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp
index e2c17b239835d..46f1d0279ca1d 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.cpp
@@ -48,4 +48,178 @@ void Semaphore::Post(u32 count) {
   FutexWake(&state_, count);
 }
 
+#if SANITIZER_CHECK_DEADLOCKS
+// An empty mutex meta table, it effectively disables deadlock detection.
+// Each tool can override the table to define own mutex hierarchy and
+// enable deadlock detection.
+// The table defines a static mutex type hierarchy (what mutex types can be locked
+// under what mutex types). This table is checked to be acyclic and then
+// actual mutex lock/unlock operations are checked to adhere to this hierarchy.
+// The checking happens on mutex types rather than on individual mutex instances
+// because doing it on mutex instances will both significantly complicate
+// the implementation, worsen performance and memory overhead and is mostly
+// unnecessary (we almost never lock multiple mutexes of the same type recursively).
+static constexpr int kMutexTypeMax = 20;
+SANITIZER_WEAK_ATTRIBUTE MutexMeta mutex_meta[kMutexTypeMax] = {};
+SANITIZER_WEAK_ATTRIBUTE void PrintMutexPC(uptr pc) {}
+static StaticSpinMutex mutex_meta_mtx;
+static int mutex_type_count = -1;
+// Adjacency matrix of what mutexes can be locked under what mutexes.
+static bool mutex_can_lock[kMutexTypeMax][kMutexTypeMax];
+// Mutex types with MutexMulti mark.
+static bool mutex_multi[kMutexTypeMax];
+
+void DebugMutexInit() {
+  // Build adjacency matrix.
+  bool leaf[kMutexTypeMax];
+  internal_memset(&leaf, 0, sizeof(leaf));
+  int cnt[kMutexTypeMax] = {};
+  internal_memset(&cnt, 0, sizeof(cnt));
+  for (int t = 0; t < kMutexTypeMax; t++) {
+    mutex_type_count = t;
+    if (!mutex_meta[t].name)
+      break;
+    CHECK_EQ(t, mutex_meta[t].type);
+    for (uptr j = 0; j < ARRAY_SIZE(mutex_meta[t].can_lock); j++) {
+      MutexType z = mutex_meta[t].can_lock[j];
+      if (z == MutexInvalid)
+        break;
+      if (z == MutexLeaf) {
+        CHECK(!leaf[t]);
+        leaf[t] = true;
+        continue;
+      }
+      if (z == MutexMulti) {
+        mutex_multi[t] = true;
+        continue;
+      }
+      CHECK_LT(z, kMutexTypeMax);
+      CHECK(!mutex_can_lock[t][z]);
+      mutex_can_lock[t][z] = true;
+      cnt[t]++;
+    }
+  }
+  // Indicates the array is not properly terminated.
+  CHECK_LT(mutex_type_count, kMutexTypeMax);
+  // Add leaf mutexes.
+  for (int t = 0; t < mutex_type_count; t++) {
+    if (!leaf[t])
+      continue;
+    CHECK_EQ(cnt[t], 0);
+    for (int z = 0; z < mutex_type_count; z++) {
+      if (z == MutexInvalid || t == z || leaf[z])
+        continue;
+      CHECK(!mutex_can_lock[z][t]);
+      mutex_can_lock[z][t] = true;
+    }
+  }
+  // Build the transitive closure and check that the graphs is acyclic.
+  u32 trans[kMutexTypeMax];
+  static_assert(sizeof(trans[0]) * 8 >= kMutexTypeMax,
+                "kMutexTypeMax does not fit into u32, switch to u64");
+  internal_memset(&trans, 0, sizeof(trans));
+  for (int i = 0; i < mutex_type_count; i++) {
+    for (int j = 0; j < mutex_type_count; j++)
+      if (mutex_can_lock[i][j])
+        trans[i] |= 1 << j;
+  }
+  for (int k = 0; k < mutex_type_count; k++) {
+    for (int i = 0; i < mutex_type_count; i++) {
+      if (trans[i] & (1 << k))
+        trans[i] |= trans[k];
+    }
+  }
+  for (int i = 0; i < mutex_type_count; i++) {
+    if (trans[i] & (1 << i)) {
+      Printf("Mutex %s participates in a cycle\n", mutex_meta[i].name);
+      Die();
+    }
+  }
+}
+
+struct InternalDeadlockDetector {
+  struct LockDesc {
+    u64 seq;
+    uptr pc;
+    int recursion;
+  };
+  int initialized;
+  u64 sequence;
+  LockDesc locked[kMutexTypeMax];
+
+  void Lock(MutexType type, uptr pc) {
+    if (!Initialize(type))
+      return;
+    CHECK_LT(type, mutex_type_count);
+    // Find the last locked mutex type.
+    // This is the type we will use for hierarchy checks.
+    u64 max_seq = 0;
+    MutexType max_idx = MutexInvalid;
+    for (int i = 0; i != mutex_type_count; i++) {
+      if (locked[i].seq == 0)
+        continue;
+      CHECK_NE(locked[i].seq, max_seq);
+      if (max_seq < locked[i].seq) {
+        max_seq = locked[i].seq;
+        max_idx = (MutexType)i;
+      }
+    }
+    if (max_idx == type && mutex_multi[type]) {
+      // Recursive lock of the same type.
+      CHECK_EQ(locked[type].seq, max_seq);
+      CHECK(locked[type].pc);
+      locked[type].recursion++;
+      return;
+    }
+    if (max_idx != MutexInvalid && !mutex_can_lock[max_idx][type]) {
+      Printf("%s: internal deadlock: can't lock %s under %s mutex\n", SanitizerToolName,
+             mutex_meta[type].name, mutex_meta[max_idx].name);
+      PrintMutexPC(pc);
+      CHECK(0);
+    }
+    locked[type].seq = ++sequence;
+    locked[type].pc = pc;
+    locked[type].recursion = 1;
+  }
+
+  void Unlock(MutexType type) {
+    if (!Initialize(type))
+      return;
+    CHECK_LT(type, mutex_type_count);
+    CHECK(locked[type].seq);
+    CHECK_GT(locked[type].recursion, 0);
+    if (--locked[type].recursion)
+      return;
+    locked[type].seq = 0;
+    locked[type].pc = 0;
+  }
+
+  void CheckNoLocks() {
+    for (int i = 0; i < mutex_type_count; i++) CHECK_EQ(locked[i].recursion, 0);
+  }
+
+  bool Initialize(MutexType type) {
+    if (type == MutexUnchecked || type == MutexInvalid)
+      return false;
+    CHECK_GT(type, MutexInvalid);
+    if (initialized != 0)
+      return initialized > 0;
+    initialized = -1;
+    SpinMutexLock lock(&mutex_meta_mtx);
+    if (mutex_type_count < 0)
+      DebugMutexInit();
+    initialized = mutex_type_count ? 1 : -1;
+    return initialized > 0;
+  }
+};
+
+static THREADLOCAL InternalDeadlockDetector deadlock_detector;
+
+void CheckedMutex::LockImpl(uptr pc) { deadlock_detector.Lock(type_, pc); }
+
+void CheckedMutex::UnlockImpl() { deadlock_detector.Unlock(type_); }
+
+void CheckedMutex::CheckNoLocksImpl() { deadlock_detector.CheckNoLocks(); }
+#endif
+
 }  // namespace __sanitizer

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
index c01e3adbf2fb5..c15b1ca08de1e 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_mutex.h
@@ -74,12 +74,87 @@ class Semaphore {
   atomic_uint32_t state_ = {0};
 };
 
+typedef int MutexType;
+
+enum {
+  // Used as sentinel and to catch unassigned types
+  // (should not be used as real Mutex type).
+  MutexInvalid = 0,
+  MutexThreadRegistry,
+  // Each tool own mutexes must start at this number.
+  MutexLastCommon,
+  // Type for legacy mutexes that are not checked for deadlocks.
+  MutexUnchecked = -1,
+  // Special marks that can be used in MutexMeta::can_lock table.
+  // The leaf mutexes can be locked under any other non-leaf mutex,
+  // but no other mutex can be locked while under a leaf mutex.
+  MutexLeaf = -1,
+  // Multiple mutexes of this type can be locked at the same time.
+  MutexMulti = -3,
+};
+
+// Go linker does not support THREADLOCAL variables,
+// so we can't use per-thread state.
+#define SANITIZER_CHECK_DEADLOCKS (SANITIZER_DEBUG && !SANITIZER_GO)
+
+#if SANITIZER_CHECK_DEADLOCKS
+struct MutexMeta {
+  MutexType type;
+  const char *name;
+  // The table fixes what mutexes can be locked under what mutexes.
+  // If the entry for MutexTypeFoo contains MutexTypeBar,
+  // then Bar mutex can be locked while under Foo mutex.
+  // Can also contain the special MutexLeaf/MutexMulti marks.
+  MutexType can_lock[10];
+};
+#endif
+
+class CheckedMutex {
+ public:
+  constexpr CheckedMutex(MutexType type)
+#if SANITIZER_CHECK_DEADLOCKS
+      : type_(type)
+#endif
+  {
+  }
+
+  ALWAYS_INLINE void Lock() {
+#if SANITIZER_CHECK_DEADLOCKS
+    LockImpl(GET_CALLER_PC());
+#endif
+  }
+
+  ALWAYS_INLINE void Unlock() {
+#if SANITIZER_CHECK_DEADLOCKS
+    UnlockImpl();
+#endif
+  }
+
+  // Checks that the current thread does not hold any mutexes
+  // (e.g. when returning from a runtime function to user code).
+  static void CheckNoLocks() {
+#if SANITIZER_CHECK_DEADLOCKS
+    CheckNoLocksImpl();
+#endif
+  }
+
+ private:
+#if SANITIZER_CHECK_DEADLOCKS
+  const MutexType type_;
+
+  void LockImpl(uptr pc);
+  void UnlockImpl();
+  static void CheckNoLocksImpl();
+#endif
+};
+
 // Reader-writer mutex.
 class MUTEX Mutex2 {
  public:
-  constexpr Mutex2() {}
+  constexpr Mutex2(MutexType type = MutexUnchecked) : checked_(type) {}
 
   void Lock() ACQUIRE() {
+    checked_.Lock();
     u64 reset_mask = ~0ull;
     u64 state = atomic_load_relaxed(&state_);
     const uptr kMaxSpinIters = 1500;
@@ -125,6 +200,7 @@ class MUTEX Mutex2 {
   }
 
   void Unlock() RELEASE() {
+    checked_.Unlock();
     bool wake_writer;
     u64 wake_readers;
     u64 new_state;
@@ -153,6 +229,7 @@ class MUTEX Mutex2 {
   }
 
   void ReadLock() ACQUIRE_SHARED() {
+    checked_.Lock();
     bool locked;
     u64 new_state;
     u64 state = atomic_load_relaxed(&state_);
@@ -173,6 +250,7 @@ class MUTEX Mutex2 {
   }
 
   void ReadUnlock() RELEASE_SHARED() {
+    checked_.Unlock();
     bool wake;
     u64 new_state;
     u64 state = atomic_load_relaxed(&state_);
@@ -207,6 +285,7 @@ class MUTEX Mutex2 {
   }
 
  private:
+  [[no_unique_address]] CheckedMutex checked_;
   atomic_uint64_t state_ = {0};
   Semaphore writers_;
   Semaphore readers_;


        


More information about the llvm-commits mailing list