<div dir="ltr">tests please</div><div class="gmail_extra"><br><br><div class="gmail_quote">On Tue, Mar 18, 2014 at 1:30 AM, Dmitry Vyukov <span dir="ltr"><<a href="mailto:dvyukov@google.com" target="_blank">dvyukov@google.com</a>></span> wrote:<br>
<blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">Author: dvyukov<br>
Date: Tue Mar 18 03:30:14 2014<br>
New Revision: 204126<br>
<br>
URL: <a href="http://llvm.org/viewvc/llvm-project?rev=204126&view=rev" target="_blank">http://llvm.org/viewvc/llvm-project?rev=204126&view=rev</a><br>
Log:<br>
tsan: better addr->object hashmap<br>
still experimental<br>
<br>
<br>
Modified:<br>
compiler-rt/trunk/lib/sanitizer_common/sanitizer_addrhashmap.h<br>
compiler-rt/trunk/lib/sanitizer_common/sanitizer_mutex.h<br>
<br>
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_addrhashmap.h<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_addrhashmap.h?rev=204126&r1=204125&r2=204126&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_addrhashmap.h?rev=204126&r1=204125&r2=204126&view=diff</a><br>
==============================================================================<br>
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_addrhashmap.h (original)<br>
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_addrhashmap.h Tue Mar 18 03:30:14 2014<br>
@@ -22,7 +22,6 @@ namespace __sanitizer {<br>
<br>
// Concurrent uptr->T hashmap.<br>
// T must be a POD type, kSize is preferrably a prime but can be any number.<br>
-// The hashmap is fixed size, it crashes on overflow.<br>
// Usage example:<br>
//<br>
// typedef AddrHashMap<uptr, 11> Map;<br>
@@ -44,11 +43,24 @@ template<typename T, uptr kSize><br>
class AddrHashMap {<br>
private:<br>
struct Cell {<br>
- StaticSpinMutex mtx;<br>
atomic_uintptr_t addr;<br>
T val;<br>
};<br>
<br>
+ struct AddBucket {<br>
+ uptr cap;<br>
+ uptr size;<br>
+ Cell cells[1]; // variable len<br>
+ };<br>
+<br>
+ static const uptr kBucketSize = 3;<br>
+<br>
+ struct Bucket {<br>
+ RWMutex mtx;<br>
+ atomic_uintptr_t add;<br>
+ Cell cells[kBucketSize];<br>
+ };<br>
+<br>
public:<br>
AddrHashMap();<br>
<br>
@@ -61,23 +73,23 @@ class AddrHashMap {<br>
bool exists() const;<br>
<br>
private:<br>
+ friend AddrHashMap<T, kSize>;<br>
AddrHashMap<T, kSize> *map_;<br>
+ Bucket *bucket_;<br>
Cell *cell_;<br>
uptr addr_;<br>
+ uptr addidx_;<br>
bool created_;<br>
bool remove_;<br>
};<br>
<br>
private:<br>
friend class Handle;<br>
- Cell *table_;<br>
-<br>
- static const uptr kLocked = 1;<br>
- static const uptr kRemoved = 2;<br>
+ Bucket *table_;<br>
<br>
- Cell *acquire(uptr addr, bool remove, bool *created);<br>
- void release(uptr addr, bool remove, bool created, Cell *c);<br>
- uptr hash(uptr addr);<br>
+ void acquire(Handle *h);<br>
+ void release(Handle *h);<br>
+ uptr calcHash(uptr addr);<br>
};<br>
<br>
template<typename T, uptr kSize><br>
@@ -86,12 +98,12 @@ AddrHashMap<T, kSize>::Handle::Handle(Ad<br>
map_ = map;<br>
addr_ = addr;<br>
remove_ = remove;<br>
- cell_ = map_->acquire(addr_, remove_, &created_);<br>
+ map_->acquire(this);<br>
}<br>
<br>
template<typename T, uptr kSize><br>
AddrHashMap<T, kSize>::Handle::~Handle() {<br>
- map_->release(addr_, remove_, created_, cell_);<br>
+ map_->release(this);<br>
}<br>
<br>
template<typename T, uptr kSize><br>
@@ -111,96 +123,183 @@ bool AddrHashMap<T, kSize>::Handle::exis<br>
<br>
template<typename T, uptr kSize><br>
AddrHashMap<T, kSize>::AddrHashMap() {<br>
- table_ = (Cell*)MmapOrDie(kSize * sizeof(Cell), "AddrHashMap");<br>
+ table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap");<br>
}<br>
<br>
template<typename T, uptr kSize><br>
-typename AddrHashMap<T, kSize>::Cell *AddrHashMap<T, kSize>::acquire(uptr addr,<br>
- bool remove, bool *created) {<br>
- // When we access the element associated with addr,<br>
- // we lock its home cell (the cell associated with hash(addr).<br>
- // If the element was just created or is going to be removed,<br>
- // we lock the cell in write mode. Otherwise we lock in read mode.<br>
- // The locking protects the object lifetime (it's not removed while<br>
- // somebody else accesses it). And also it helps to resolve concurrent<br>
- // inserts.<br>
- // Note that the home cell is not necessary the cell where the element is<br>
- // stored.<br>
- *created = false;<br>
- uptr h0 = hash(addr);<br>
- Cell *c0 = &table_[h0];<br>
+void AddrHashMap<T, kSize>::acquire(Handle *h) {<br>
+ uptr addr = h->addr_;<br>
+ uptr hash = calcHash(addr);<br>
+ Bucket *b = &table_[hash];<br>
+<br>
+ h->created_ = false;<br>
+ h->addidx_ = -1;<br>
+ h->bucket_ = b;<br>
+ h->cell_ = 0;<br>
+<br>
+ // If we want to remove the element, we need exclusive access to the bucket,<br>
+ // so skip the lock-free phase.<br>
+ if (h->remove_)<br>
+ goto locked;<br>
+<br>
+ retry:<br>
// First try to find an existing element w/o read mutex.<br>
- {<br>
- uptr h = h0;<br>
- for (;;) {<br>
- Cell *c = &table_[h];<br>
- uptr addr1 = atomic_load(&c->addr, memory_order_acquire);<br>
- if (addr1 == 0) // empty cell denotes end of the cell chain for the elem<br>
- break;<br>
- // Locked cell means that another thread can be concurrently inserting<br>
- // the same element, fallback to mutex.<br>
- if (addr1 == kLocked)<br>
- break;<br>
- if (addr1 == addr) // ok, found it<br>
- return c;<br>
- h++;<br>
- if (h == kSize)<br>
- h = 0;<br>
- CHECK_NE(h, h0); // made the full cycle<br>
+ CHECK(!h->remove_);<br>
+ // Check the embed cells.<br>
+ for (uptr i = 0; i < kBucketSize; i++) {<br>
+ Cell *c = &b->cells[i];<br>
+ uptr addr1 = atomic_load(&c->addr, memory_order_acquire);<br>
+ if (addr1 == addr) {<br>
+ h->cell_ = c;<br>
+ return;<br>
}<br>
}<br>
- if (remove)<br>
- return 0;<br>
- // Now try to create it under the mutex.<br>
- c0->mtx.Lock();<br>
- uptr h = h0;<br>
- for (;;) {<br>
- Cell *c = &table_[h];<br>
- uptr addr1 = atomic_load(&c->addr, memory_order_acquire);<br>
- if (addr1 == addr) { // another thread has inserted it ahead of us<br>
- c0->mtx.Unlock();<br>
- return c;<br>
+<br>
+ // Check the add cells with read lock.<br>
+ if (atomic_load(&b->add, memory_order_relaxed)) {<br>
+ b->mtx.ReadLock();<br>
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);<br>
+ for (uptr i = 0; i < add->size; i++) {<br>
+ Cell *c = &add->cells[i];<br>
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);<br>
+ if (addr1 == addr) {<br>
+ h->addidx_ = i;<br>
+ h->cell_ = c;<br>
+ return;<br>
+ }<br>
}<br>
- // Skip kLocked, since we hold the home cell mutex, it can't be our elem.<br>
- if ((addr1 == 0 || addr1 == kRemoved) &&<br>
- atomic_compare_exchange_strong(&c->addr, &addr1, kLocked,<br>
- memory_order_acq_rel)) {<br>
- // we've created the element<br>
- *created = true;<br>
- return c;<br>
+ b->mtx.ReadUnlock();<br>
+ }<br>
+<br>
+ locked:<br>
+ // Re-check existence under write lock.<br>
+ // Embed cells.<br>
+ b->mtx.Lock();<br>
+ for (uptr i = 0; i < kBucketSize; i++) {<br>
+ Cell *c = &b->cells[i];<br>
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);<br>
+ if (addr1 == addr) {<br>
+ if (h->remove_) {<br>
+ h->cell_ = c;<br>
+ return;<br>
+ }<br>
+ b->mtx.Unlock();<br>
+ goto retry;<br>
}<br>
- h++;<br>
- if (h == kSize)<br>
- h = 0;<br>
- CHECK_NE(h, h0); // made the full cycle<br>
}<br>
+<br>
+ // Add cells.<br>
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);<br>
+ if (add) {<br>
+ for (uptr i = 0; i < add->size; i++) {<br>
+ Cell *c = &add->cells[i];<br>
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);<br>
+ if (addr1 == addr) {<br>
+ if (h->remove_) {<br>
+ h->addidx_ = i;<br>
+ h->cell_ = c;<br>
+ return;<br>
+ }<br>
+ b->mtx.Unlock();<br>
+ goto retry;<br>
+ }<br>
+ }<br>
+ }<br>
+<br>
+ // The element does not exist, no need to create it if we want to remove.<br>
+ if (h->remove_) {<br>
+ b->mtx.Unlock();<br>
+ return;<br>
+ }<br>
+<br>
+ // Now try to create it under the mutex.<br>
+ h->created_ = true;<br>
+ // See if we have a free embed cell.<br>
+ for (uptr i = 0; i < kBucketSize; i++) {<br>
+ Cell *c = &b->cells[i];<br>
+ uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);<br>
+ if (addr1 == 0) {<br>
+ h->cell_ = c;<br>
+ return;<br>
+ }<br>
+ }<br>
+<br>
+ // Store in the add cells.<br>
+ if (add == 0) {<br>
+ // Allocate a new add array.<br>
+ const uptr kInitSize = 64;<br>
+ add = (AddBucket*)InternalAlloc(kInitSize);<br>
+ add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1;<br>
+ add->size = 0;<br>
+ atomic_store(&b->add, (uptr)add, memory_order_relaxed);<br>
+ }<br>
+ if (add->size == add->cap) {<br>
+ // Grow existing add array.<br>
+ uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]);<br>
+ uptr newsize = oldsize * 2;<br>
+ AddBucket *add1 = (AddBucket*)InternalAlloc(oldsize * 2);<br>
+ add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1;<br>
+ add1->size = add->size;<br>
+ internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0]));<br>
+ InternalFree(add);<br>
+ atomic_store(&b->add, (uptr)add1, memory_order_relaxed);<br>
+ add = add1;<br>
+ }<br>
+ // Store.<br>
+ uptr i = add->size++;<br>
+ Cell *c = &add->cells[i];<br>
+ h->addidx_ = i;<br>
+ h->cell_ = c;<br>
}<br>
<br>
template<typename T, uptr kSize><br>
-void AddrHashMap<T, kSize>::release(uptr addr, bool remove, bool created,<br>
- Cell *c) {<br>
- if (c == 0)<br>
+void AddrHashMap<T, kSize>::release(Handle *h) {<br>
+ if (h->cell_ == 0)<br>
return;<br>
- // if we are going to remove, we must hold write lock<br>
+ Bucket *b = h->bucket_;<br>
+ Cell *c = h->cell_;<br>
uptr addr1 = atomic_load(&c->addr, memory_order_relaxed);<br>
- if (created) {<br>
- // denote completion of insertion<br>
- atomic_store(&c->addr, addr, memory_order_release);<br>
- // unlock the home cell<br>
- uptr h0 = hash(addr);<br>
- Cell *c0 = &table_[h0];<br>
- c0->mtx.Unlock();<br>
- } else {<br>
- CHECK_EQ(addr, addr1);<br>
- if (remove) {<br>
- // denote that the cell is empty now<br>
- atomic_store(&c->addr, kRemoved, memory_order_release);<br>
+ if (h->created_) {<br>
+ // Denote completion of insertion.<br>
+ CHECK_EQ(addr1, 0);<br>
+ // After the following store, the element becomes available<br>
+ // for lock-free reads.<br>
+ atomic_store(&c->addr, h->addr_, memory_order_release);<br>
+ b->mtx.Unlock();<br>
+ } else if (h->remove_) {<br>
+ // Denote that the cell is empty now.<br>
+ CHECK_EQ(addr1, h->addr_);<br>
+ atomic_store(&c->addr, 0, memory_order_release);<br>
+ // See if we need to compact the bucket.<br>
+ AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed);<br>
+ if (h->addidx_ == -1) {<br>
+ // Removed from embed array, move an add element into the freed cell.<br>
+ if (add) {<br>
+ uptr last = --add->size;<br>
+ Cell *c1 = &add->cells[last];<br>
+ c->val = c1->val;<br>
+ uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed);<br>
+ atomic_store(&c->addr, addr1, memory_order_release);<br>
+ }<br>
+ } else {<br>
+ // Removed from add array, compact it.<br>
+ uptr last = --add->size;<br>
+ Cell *c1 = &add->cells[last];<br>
+ *c = *c1;<br>
+ }<br>
+ if (add && add->size == 0) {<br>
+ // FIXME(dvyukov): free add?<br>
}<br>
+ b->mtx.Unlock();<br>
+ } else {<br>
+ CHECK_EQ(addr1, h->addr_);<br>
+ if (h->addidx_ != -1)<br>
+ b->mtx.ReadUnlock();<br>
}<br>
}<br>
<br>
template<typename T, uptr kSize><br>
-uptr AddrHashMap<T, kSize>::hash(uptr addr) {<br>
+uptr AddrHashMap<T, kSize>::calcHash(uptr addr) {<br>
addr += addr << 10;<br>
addr ^= addr >> 6;<br>
return addr % kSize;<br>
<br>
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_mutex.h<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_mutex.h?rev=204126&r1=204125&r2=204126&view=diff" target="_blank">http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_mutex.h?rev=204126&r1=204125&r2=204126&view=diff</a><br>
==============================================================================<br>
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_mutex.h (original)<br>
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_mutex.h Tue Mar 18 03:30:14 2014<br>
@@ -83,6 +83,88 @@ class BlockingMutex {<br>
uptr owner_; // for debugging<br>
};<br>
<br>
+// Reader-writer spin mutex.<br>
+class RWMutex {<br>
+ public:<br>
+ RWMutex() {<br>
+ atomic_store(&state_, kUnlocked, memory_order_relaxed);<br>
+ }<br>
+<br>
+ ~RWMutex() {<br>
+ CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);<br>
+ }<br>
+<br>
+ void Lock() {<br>
+ u32 cmp = kUnlocked;<br>
+ if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,<br>
+ memory_order_acquire))<br>
+ return;<br>
+ LockSlow();<br>
+ }<br>
+<br>
+ void Unlock() {<br>
+ u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);<br>
+ DCHECK_NE(prev & kWriteLock, 0);<br>
+ (void)prev;<br>
+ }<br>
+<br>
+ void ReadLock() {<br>
+ u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);<br>
+ if ((prev & kWriteLock) == 0)<br>
+ return;<br>
+ ReadLockSlow();<br>
+ }<br>
+<br>
+ void ReadUnlock() {<br>
+ u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);<br>
+ DCHECK_EQ(prev & kWriteLock, 0);<br>
+ DCHECK_GT(prev & ~kWriteLock, 0);<br>
+ (void)prev;<br>
+ }<br>
+<br>
+ void CheckLocked() {<br>
+ CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);<br>
+ }<br>
+<br>
+ private:<br>
+ atomic_uint32_t state_;<br>
+<br>
+ enum {<br>
+ kUnlocked = 0,<br>
+ kWriteLock = 1,<br>
+ kReadLock = 2<br>
+ };<br>
+<br>
+ void NOINLINE LockSlow() {<br>
+ for (int i = 0;; i++) {<br>
+ if (i < 10)<br>
+ proc_yield(10);<br>
+ else<br>
+ internal_sched_yield();<br>
+ u32 cmp = atomic_load(&state_, memory_order_relaxed);<br>
+ if (cmp == kUnlocked &&<br>
+ atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,<br>
+ memory_order_acquire))<br>
+ return;<br>
+ }<br>
+ }<br>
+<br>
+ void NOINLINE ReadLockSlow() {<br>
+ for (int i = 0;; i++) {<br>
+ if (i < 10)<br>
+ proc_yield(10);<br>
+ else<br>
+ internal_sched_yield();<br>
+ u32 prev = atomic_load(&state_, memory_order_acquire);<br>
+ if ((prev & kWriteLock) == 0)<br>
+ return;<br>
+ }<br>
+ }<br>
+<br>
+ RWMutex(const RWMutex&);<br>
+ void operator = (const RWMutex&);<br>
+};<br>
+<br>
template<typename MutexType><br>
class GenericScopedLock {<br>
public:<br>
@@ -123,6 +205,8 @@ class GenericScopedReadLock {<br>
<br>
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;<br>
typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;<br>
+typedef GenericScopedLock<RWMutex> RWMutexLock;<br>
+typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;<br>
<br>
} // namespace __sanitizer<br>
<br>
<br>
<br>
_______________________________________________<br>
llvm-commits mailing list<br>
<a href="mailto:llvm-commits@cs.uiuc.edu">llvm-commits@cs.uiuc.edu</a><br>
<a href="http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits" target="_blank">http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits</a><br>
</blockquote></div><br></div>