[compiler-rt] r202826 - tsan: add concurrent hashmap for standalone deadlock detector

Alexey Samsonov samsonov at google.com
Tue Mar 4 03:53:26 PST 2014


1) Please add some tests for AddrHashMap and RWMutex to
sanitizer_common/tests
2) Add sanitizer_addrhashmap.h to lib/sanitizer_common/CMakeLists.txt


On Tue, Mar 4, 2014 at 3:39 PM, Dmitry Vyukov <dvyukov at google.com> wrote:

> Author: dvyukov
> Date: Tue Mar  4 05:39:56 2014
> New Revision: 202826
>
> URL: http://llvm.org/viewvc/llvm-project?rev=202826&view=rev
> Log:
> tsan: add concurrent hashmap for standalone deadlock detector
>
>
> Added:
>     compiler-rt/trunk/lib/sanitizer_common/sanitizer_addrhashmap.h
>     compiler-rt/trunk/lib/tsan/dd/
>       - copied from r202792, compiler-rt/trunk/lib/tsan/dd/
> Modified:
>     compiler-rt/trunk/lib/sanitizer_common/sanitizer_mutex.h
>     compiler-rt/trunk/lib/tsan/dd/dd_rtl.cc
>     compiler-rt/trunk/lib/tsan/dd/dd_rtl.h
>
> Added: compiler-rt/trunk/lib/sanitizer_common/sanitizer_addrhashmap.h
> URL:
> http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_addrhashmap.h?rev=202826&view=auto
>
> ==============================================================================
> --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_addrhashmap.h (added)
> +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_addrhashmap.h Tue Mar
>  4 05:39:56 2014
> @@ -0,0 +1,207 @@
> +//===-- sanitizer_addrhashmap.h ---------------------------------*- C++
> -*-===//
> +//
> +//                     The LLVM Compiler Infrastructure
> +//
> +// This file is distributed under the University of Illinois Open Source
> +// License. See LICENSE.TXT for details.
> +//
>
> +//===----------------------------------------------------------------------===//
> +//
> +// Concurrent uptr->T hashmap.
> +//
>
> +//===----------------------------------------------------------------------===//
> +
> +#ifndef SANITIZER_ADDRHASHMAP_H
> +#define SANITIZER_ADDRHASHMAP_H
> +
> +#include "sanitizer_common.h"
> +#include "sanitizer_mutex.h"
> +#include "sanitizer_atomic.h"
> +
> +namespace __sanitizer {
> +
> +// Concurrent uptr->T hashmap.
> +// T must be a POD type, kSize is preferrably a prime but can be any
> number.
> +// The hashmap is fixed size, it crashes on overflow.
> +// Usage example:
> +//
> +// typedef AddrHashMap<uptr, 11> Map;
> +// Map m;
> +// {
> +//   Map::Handle h(&m, addr);
> +//   use h.operator->() to access the data
> +//   if h.created() then the element was just created, and the current
> thread
> +//     has exclusive access to it
> +//   otherwise the current thread has only read access to the data
> +// }
> +// {
> +//   Map::Handle h(&m, addr, true);
> +//   this will remove the data from the map in Handle dtor
> +//   the current thread has exclusive access to the data
> +//   if !h.exists() then the element never existed
> +// }
> +template<typename T, uptr kSize>
> +class AddrHashMap {
> + private:
> +  struct Cell {
> +    RWMutex          mtx;
> +    atomic_uintptr_t addr;
> +    T                val;
> +  };
> +
> + public:
> +  AddrHashMap();
> +
> +  class Handle {
> +   public:
> +    Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove = false);
> +    ~Handle();
> +    T *operator -> ();
> +    bool created() const;
> +    bool exists() const;
> +
> +   private:
> +    AddrHashMap<T, kSize> *map_;
> +    Cell                  *cell_;
> +    uptr                   addr_;
> +    bool                   created_;
> +    bool                   remove_;
> +    bool                   write_;
> +  };
> +
> + private:
> +  friend class Handle;
> +  Cell *table_;
> +
> +  static const uptr kRemoved = 1;
> +
> +  Cell *acquire(uptr addr, bool remove, bool *created, bool *write);
> +  void  release(uptr addr, bool remove, bool write, Cell *c);
> +  uptr hash(uptr addr);
> +};
> +
> +template<typename T, uptr kSize>
> +AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr
> addr,
> +    bool remove) {
> +  map_ = map;
> +  addr_ = addr;
> +  remove_ = remove;
> +  cell_ = map_->acquire(addr_, remove_, &created_, &write_);
> +}
> +
> +template<typename T, uptr kSize>
> +AddrHashMap<T, kSize>::Handle::~Handle() {
> +  map_->release(addr_, remove_, write_, cell_);
> +}
> +
> +template<typename T, uptr kSize>
> +T *AddrHashMap<T, kSize>::Handle::operator -> () {
> +  return &cell_->val;
> +}
> +
> +template<typename T, uptr kSize>
> +bool AddrHashMap<T, kSize>::Handle::created() const {
> +  return created_;
> +}
> +
> +template<typename T, uptr kSize>
> +bool AddrHashMap<T, kSize>::Handle::exists() const {
> +  return cell_ != 0;
> +}
> +
> +template<typename T, uptr kSize>
> +AddrHashMap<T, kSize>::AddrHashMap() {
> +  table_ = (Cell*)MmapOrDie(kSize * sizeof(Cell), "AddrHashMap");
> +}
> +
> +template<typename T, uptr kSize>
> +typename AddrHashMap<T, kSize>::Cell *AddrHashMap<T, kSize>::acquire(uptr
> addr,
> +    bool remove, bool *created, bool *write) {
> +  // When we access the element associated with addr,
> +  // we lock its home cell (the cell associated with hash(addr).
> +  // If the element was just created or is going to be removed,
> +  // we lock the cell in write mode. Otherwise we lock in read mode.
> +  // The locking protects the object lifetime (it's not removed while
> +  // somebody else accesses it). And also it helps to resolve concurrent
> +  // inserts.
> +  // Note that the home cell is not necessary the cell where the element
> is
> +  // stored.
> +  *write = false;
> +  *created = false;
> +  uptr h0 = hash(addr);
> +  Cell *c0 = &table_[h0];
> +  // First try to find an existing element under read lock.
> +  if (!remove) {
> +    c0->mtx.ReadLock();
> +    uptr h = h0;
> +    for (;;) {
> +      Cell *c = &table_[h];
> +      uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
> +      if (addr1 == 0)  // empty cell denotes end of the cell chain for
> the elem
> +        break;
> +      if (addr1 == addr)  // ok, found it
> +        return c;
> +      h++;
> +      if (h == kSize)
> +        h = 0;
> +      CHECK_NE(h, h0);  // made the full cycle
> +    }
> +    c0->mtx.ReadUnlock();
> +  }
> +  // Now try to create it under write lock.
> +  *write = true;
> +  c0->mtx.Lock();
> +  uptr h = h0;
> +  for (;;) {
> +    Cell *c = &table_[h];
> +    uptr addr1 = atomic_load(&c->addr, memory_order_acquire);
> +    if (addr1 == addr)  // another thread has inserted it ahead of us
> +      return c;
> +    if (remove && addr1 == 0) {  // the element has not existed before
> +      c0->mtx.Unlock();
> +      return 0;
> +    }
> +    if (!remove && (addr1 == 0 ||addr1 == kRemoved) &&
> +        atomic_compare_exchange_strong(&c->addr, &addr1, addr,
> +          memory_order_acq_rel)) {
> +      // we've created the element
> +      *created = true;
> +      return c;
> +    }
> +    h++;
> +    if (h == kSize)
> +      h = 0;
> +    CHECK_NE(h, h0);  // made the full cycle
> +  }
> +}
> +
> +template<typename T, uptr kSize>
> +void AddrHashMap<T, kSize>::release(uptr addr, bool remove, bool write,
> +    Cell *c) {
> +  if (c == 0)
> +    return;
> +  // if we are going to remove, we must hold write lock
> +  CHECK_EQ(!remove || write, true);
> +  CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), addr);
> +  // denote that the cell is empty now
> +  if (remove)
> +    atomic_store(&c->addr, kRemoved, memory_order_release);
> +  // unlock the home cell
> +  uptr h0 = hash(addr);
> +  Cell *c0 = &table_[h0];
> +  if (write)
> +    c0->mtx.Unlock();
> +  else
> +    c0->mtx.ReadUnlock();
> +}
> +
> +template<typename T, uptr kSize>
> +uptr AddrHashMap<T, kSize>::hash(uptr addr) {
> +  addr += addr << 10;
> +  addr ^= addr >> 6;
> +  return addr % kSize;
> +}
> +
> +} // namespace __sanitizer
> +
> +#endif // SANITIZER_ADDRHASHMAP_H
>
> Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_mutex.h
> URL:
> http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_mutex.h?rev=202826&r1=202825&r2=202826&view=diff
>
> ==============================================================================
> --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_mutex.h (original)
> +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_mutex.h Tue Mar  4
> 05:39:56 2014
> @@ -83,6 +83,88 @@ class BlockingMutex {
>    uptr owner_;  // for debugging
>  };
>
> +// Reader-writer spin mutex.
> +class RWMutex {
> + public:
> +  RWMutex() {
> +    atomic_store(&state_, kUnlocked, memory_order_relaxed);
> +  }
> +
> +  ~RWMutex() {
> +    CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
> +  }
> +
> +  void Lock() {
> +    uptr cmp = kUnlocked;
> +    if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
> +                                       memory_order_acquire))
> +      return;
> +    LockSlow();
> +  }
> +
> +  void Unlock() {
> +    uptr prev = atomic_fetch_sub(&state_, kWriteLock,
> memory_order_release);
> +    DCHECK_NE(prev & kWriteLock, 0);
> +    (void)prev;
> +  }
> +
> +  void ReadLock() {
> +    uptr prev = atomic_fetch_add(&state_, kReadLock,
> memory_order_acquire);
> +    if ((prev & kWriteLock) == 0)
> +      return;
> +    ReadLockSlow();
> +  }
> +
> +  void ReadUnlock() {
> +    uptr prev = atomic_fetch_sub(&state_, kReadLock,
> memory_order_release);
> +    DCHECK_EQ(prev & kWriteLock, 0);
> +    DCHECK_GT(prev & ~kWriteLock, 0);
> +    (void)prev;
> +  }
> +
> +  void CheckLocked() {
> +    CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
> +  }
> +
> + private:
> +  atomic_uintptr_t state_;
> +
> +  enum {
> +    kUnlocked = 0,
> +    kWriteLock = 1,
> +    kReadLock = 2
> +  };
> +
> +  void NOINLINE LockSlow() {
> +    for (int i = 0;; i++) {
> +      if (i < 10)
> +        proc_yield(10);
> +      else
> +        internal_sched_yield();
> +      uptr cmp = atomic_load(&state_, memory_order_relaxed);
> +      if (cmp == kUnlocked &&
> +          atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
> +                                       memory_order_acquire))
> +          return;
> +    }
> +  }
> +
> +  void NOINLINE ReadLockSlow() {
> +    for (int i = 0;; i++) {
> +      if (i < 10)
> +        proc_yield(10);
> +      else
> +        internal_sched_yield();
> +      uptr  prev = atomic_load(&state_, memory_order_acquire);
> +      if ((prev & kWriteLock) == 0)
> +        return;
> +    }
> +  }
> +
> +  RWMutex(const RWMutex&);
> +  void operator = (const RWMutex&);
> +};
> +
>  template<typename MutexType>
>  class GenericScopedLock {
>   public:
> @@ -123,6 +205,8 @@ class GenericScopedReadLock {
>
>  typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
>  typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
> +typedef GenericScopedLock<RWMutex> RWMutexLock;
> +typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
>
>  }  // namespace __sanitizer
>
>
> Modified: compiler-rt/trunk/lib/tsan/dd/dd_rtl.cc
> URL:
> http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/dd/dd_rtl.cc?rev=202826&r1=202792&r2=202826&view=diff
>
> ==============================================================================
> --- compiler-rt/trunk/lib/tsan/dd/dd_rtl.cc (original)
> +++ compiler-rt/trunk/lib/tsan/dd/dd_rtl.cc Tue Mar  4 05:39:56 2014
> @@ -9,16 +9,19 @@
>
>  #include "dd_rtl.h"
>  #include "sanitizer_common/sanitizer_common.h"
> +#include "sanitizer_common/sanitizer_placement_new.h"
>  #include "sanitizer_common/sanitizer_flags.h"
>  #include "sanitizer_common/sanitizer_stacktrace.h"
>  #include "sanitizer_common/sanitizer_stackdepot.h"
>
>  namespace __dsan {
>
> -static Context ctx0;
> -static Context * const ctx = &ctx0;
> +static Context *ctx;
>
>  void Initialize() {
> +  static u64 ctx_mem[sizeof(Context) / sizeof(u64) + 1];
> +  ctx = new(ctx_mem) Context();
> +
>    InitializeInterceptors();
>    //common_flags()->allow_addr2line = true;
>    common_flags()->symbolize = true;
> @@ -37,9 +40,9 @@ void ThreadDestroy(Thread *thr) {
>
>  static u32 CurrentStackTrace(Thread *thr) {
>    StackTrace trace;
> -  thr->in_symbolizer = true;
> +  thr->ignore_interceptors = true;
>    trace.Unwind(1000, 0, 0, 0, 0, 0, false);
> -  thr->in_symbolizer = false;
> +  thr->ignore_interceptors = false;
>    const uptr skip = 4;
>    if (trace.size <= skip)
>      return 0;
> @@ -49,39 +52,9 @@ static u32 CurrentStackTrace(Thread *thr
>  static void PrintStackTrace(Thread *thr, u32 stk) {
>    uptr size = 0;
>    const uptr *trace = StackDepotGet(stk, &size);
> -  thr->in_symbolizer = true;
> +  thr->ignore_interceptors = true;
>    StackTrace::PrintStack(trace, size);
> -  thr->in_symbolizer = false;
> -}
> -
> -static Mutex *FindMutex(Thread *thr, uptr m) {
> -  SpinMutexLock l(&ctx->mutex_mtx);
> -  for (Mutex *mtx = ctx->mutex_list; mtx; mtx = mtx->link) {
> -    if (mtx->addr == m)
> -      return mtx;
> -  }
> -  Mutex *mtx = (Mutex*)InternalAlloc(sizeof(*mtx));
> -  internal_memset(mtx, 0, sizeof(*mtx));
> -  mtx->addr = m;
> -  ctx->dd->MutexInit(&mtx->dd, CurrentStackTrace(thr), ctx->mutex_seq++);
> -  mtx->link = ctx->mutex_list;
> -  ctx->mutex_list = mtx;
> -  return mtx;
> -}
> -
> -static Mutex *FindMutexAndRemove(uptr m) {
> -  SpinMutexLock l(&ctx->mutex_mtx);
> -  Mutex **prev = &ctx->mutex_list;
> -  for (;;) {
> -    Mutex *mtx = *prev;
> -    if (mtx == 0)
> -      return 0;
> -    if (mtx->addr == m) {
> -      *prev = mtx->link;
> -      return mtx;
> -    }
> -    prev = &mtx->link;
> -  }
> +  thr->ignore_interceptors = false;
>  }
>
>  static void ReportDeadlock(Thread *thr, DDReport *rep) {
> @@ -96,30 +69,34 @@ static void ReportDeadlock(Thread *thr,
>  }
>
>  void MutexLock(Thread *thr, uptr m, bool writelock, bool trylock) {
> -  if (thr->in_symbolizer)
> +  if (thr->ignore_interceptors)
>      return;
> -  Mutex *mtx = FindMutex(thr, m);
> -  DDReport *rep = ctx->dd->MutexLock(thr->dd_pt, thr->dd_lt, &mtx->dd,
> -      writelock, trylock);
> +  DDReport *rep = 0;
> +  {
> +    MutexHashMap::Handle h(&ctx->mutex_map, m);
> +    if (h.created())
> +      ctx->dd->MutexInit(&h->dd, CurrentStackTrace(thr), m);
> +    rep = ctx->dd->MutexLock(thr->dd_pt, thr->dd_lt, &h->dd,
> +                             writelock, trylock);
> +  }
>    if (rep)
>      ReportDeadlock(thr, rep);
>  }
>
>  void MutexUnlock(Thread *thr, uptr m, bool writelock) {
> -  if (thr->in_symbolizer)
> +  if (thr->ignore_interceptors)
>      return;
> -  Mutex *mtx = FindMutex(thr, m);
> -  ctx->dd->MutexUnlock(thr->dd_pt, thr->dd_lt, &mtx->dd, writelock);
> +  MutexHashMap::Handle h(&ctx->mutex_map, m);
> +  ctx->dd->MutexUnlock(thr->dd_pt, thr->dd_lt, &h->dd, writelock);
>  }
>
>  void MutexDestroy(Thread *thr, uptr m) {
> -  if (thr->in_symbolizer)
> +  if (thr->ignore_interceptors)
>      return;
> -  Mutex *mtx = FindMutexAndRemove(m);
> -  if (mtx == 0)
> +  MutexHashMap::Handle h(&ctx->mutex_map, m, true);
> +  if (!h.exists())
>      return;
> -  ctx->dd->MutexDestroy(thr->dd_pt, thr->dd_lt, &mtx->dd);
> -  InternalFree(mtx);
> +  ctx->dd->MutexDestroy(thr->dd_pt, thr->dd_lt, &h->dd);
>  }
>
>  }  // namespace __dsan
>
> Modified: compiler-rt/trunk/lib/tsan/dd/dd_rtl.h
> URL:
> http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/dd/dd_rtl.h?rev=202826&r1=202792&r2=202826&view=diff
>
> ==============================================================================
> --- compiler-rt/trunk/lib/tsan/dd/dd_rtl.h (original)
> +++ compiler-rt/trunk/lib/tsan/dd/dd_rtl.h Tue Mar  4 05:39:56 2014
> @@ -12,13 +12,12 @@
>  #include "sanitizer_common/sanitizer_internal_defs.h"
>  #include "sanitizer_common/sanitizer_deadlock_detector_interface.h"
>  #include "sanitizer_common/sanitizer_allocator_internal.h"
> +#include "sanitizer_common/sanitizer_addrhashmap.h"
>  #include "sanitizer_common/sanitizer_mutex.h"
>
>  namespace __dsan {
>
>  struct Mutex {
> -  Mutex *link;
> -  uptr addr;
>    DDMutex dd;
>  };
>
> @@ -26,15 +25,15 @@ struct Thread {
>    DDPhysicalThread *dd_pt;
>    DDLogicalThread *dd_lt;
>
> -  bool in_symbolizer;
> +  bool ignore_interceptors;
>  };
>
> +typedef AddrHashMap<Mutex, 1000003> MutexHashMap;
> +
>  struct Context {
>    DDetector *dd;
>
> -  SpinMutex mutex_mtx;
> -  Mutex *mutex_list;
> -  u64 mutex_seq;
> +  MutexHashMap mutex_map;
>  };
>
>  void InitializeInterceptors();
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at cs.uiuc.edu
> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>



-- 
Alexey Samsonov, MSK
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20140304/286b8267/attachment.html>


More information about the llvm-commits mailing list