[compiler-rt] r177312 - tsan: smaller memory block headers (32b->16b)

Dmitry Vyukov dvyukov at google.com
Mon Mar 18 12:47:36 PDT 2013


Author: dvyukov
Date: Mon Mar 18 14:47:36 2013
New Revision: 177312

URL: http://llvm.org/viewvc/llvm-project?rev=177312&view=rev
Log:
tsan: smaller memory block headers (32b->16b)


Modified:
    compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_report.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_sync.cc
    compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc?rev=177312&r1=177311&r2=177312&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc Mon Mar 18 14:47:36 2013
@@ -29,6 +29,32 @@ extern "C" void WEAK __tsan_free_hook(vo
 
 namespace __tsan {
 
+COMPILER_CHECK(sizeof(MBlock) == 16);
+
+void MBlock::Lock() {
+  atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
+  uptr v = atomic_load(a, memory_order_relaxed);
+  for (int iter = 0;; iter++) {
+    if (v & 1) {
+      if (iter < 10)
+        proc_yield(20);
+      else
+        internal_sched_yield();
+      v = atomic_load(a, memory_order_relaxed);
+      continue;
+    }
+    if (atomic_compare_exchange_weak(a, &v, v | 1, memory_order_acquire))
+      break;
+  }
+}
+
+void MBlock::Unlock() {
+  atomic_uintptr_t *a = reinterpret_cast<atomic_uintptr_t*>(this);
+  uptr v = atomic_load(a, memory_order_relaxed);
+  DCHECK(v & 1);
+  atomic_store(a, v & ~1, memory_order_relaxed);
+}
+
 struct MapUnmapCallback {
   void OnMap(uptr p, uptr size) const { }
   void OnUnmap(uptr p, uptr size) const {
@@ -79,13 +105,9 @@ void *user_alloc(ThreadState *thr, uptr
   if (p == 0)
     return 0;
   MBlock *b = new(allocator()->GetMetaData(p)) MBlock;
-  b->size = sz;
-  b->head = 0;
-  b->alloc_tid = thr->unique_id;
-  b->alloc_stack_id = CurrentStackId(thr, pc);
-  if (CTX() && CTX()->initialized) {
+  b->Init(sz, thr->tid, CurrentStackId(thr, pc));
+  if (CTX() && CTX()->initialized)
     MemoryRangeImitateWrite(thr, pc, (uptr)p, sz);
-  }
   DPrintf("#%d: alloc(%zu) = %p\n", thr->tid, sz, p);
   SignalUnsafeCall(thr, pc);
   return p;
@@ -96,9 +118,9 @@ void user_free(ThreadState *thr, uptr pc
   CHECK_NE(p, (void*)0);
   DPrintf("#%d: free(%p)\n", thr->tid, p);
   MBlock *b = (MBlock*)allocator()->GetMetaData(p);
-  if (b->head)   {
-    Lock l(&b->mtx);
-    for (SyncVar *s = b->head; s;) {
+  if (b->ListHead()) {
+    MBlock::ScopedLock l(b);
+    for (SyncVar *s = b->ListHead(); s;) {
       SyncVar *res = s;
       s = s->next;
       StatInc(thr, StatSyncDestroyed);
@@ -106,12 +128,10 @@ void user_free(ThreadState *thr, uptr pc
       res->mtx.Unlock();
       DestroyAndFree(res);
     }
-    b->head = 0;
+    b->ListReset();
   }
-  if (CTX() && CTX()->initialized && thr->in_rtl == 1) {
-    MemoryRangeFreed(thr, pc, (uptr)p, b->size);
-  }
-  b->~MBlock();
+  if (CTX() && CTX()->initialized && thr->in_rtl == 1)
+    MemoryRangeFreed(thr, pc, (uptr)p, b->Size());
   allocator()->Deallocate(&thr->alloc_cache, p);
   SignalUnsafeCall(thr, pc);
 }
@@ -127,12 +147,11 @@ void *user_realloc(ThreadState *thr, upt
       return 0;
     if (p) {
       MBlock *b = user_mblock(thr, p);
-      internal_memcpy(p2, p, min(b->size, sz));
+      internal_memcpy(p2, p, min(b->Size(), sz));
     }
   }
-  if (p) {
+  if (p)
     user_free(thr, pc, p);
-  }
   return p2;
 }
 
@@ -141,7 +160,7 @@ uptr user_alloc_usable_size(ThreadState
   if (p == 0)
     return 0;
   MBlock *b = (MBlock*)allocator()->GetMetaData(p);
-  return (b) ? b->size : 0;
+  return b ? b->Size() : 0;
 }
 
 MBlock *user_mblock(ThreadState *thr, void *p) {
@@ -232,7 +251,7 @@ uptr __tsan_get_allocated_size(void *p)
   if (p == 0)
     return 0;
   MBlock *b = (MBlock*)allocator()->GetMetaData(p);
-  return b->size;
+  return b->Size();
 }
 
 void __tsan_on_thread_idle() {

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h?rev=177312&r1=177311&r2=177312&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h Mon Mar 18 14:47:36 2013
@@ -47,15 +47,73 @@ namespace __tsan {
 
 // Descriptor of user's memory block.
 struct MBlock {
-  Mutex mtx;
-  uptr size;
-  u32 alloc_tid;
-  u32 alloc_stack_id;
-  SyncVar *head;
+  /*
+  u64 mtx : 1;  // must be first
+  u64 lst : 44;
+  u64 stk : 31;  // on word boundary
+  u64 tid : kTidBits;
+  u64 siz : 128 - 1 - 31 - 44 - kTidBits;  // 39
+  */
+  u64 raw[2];
 
-  MBlock()
-    : mtx(MutexTypeMBlock, StatMtxMBlock) {
+  void Init(uptr siz, u32 tid, u32 stk) {
+    raw[0] = raw[1] = 0;
+    raw[1] |= (u64)siz << ((1 + 44 + 31 + kTidBits) % 64);
+    raw[1] |= (u64)tid << ((1 + 44 + 31) % 64);
+    raw[0] |= (u64)stk << (1 + 44);
+    raw[1] |= (u64)stk >> (64 - 44 - 1);
+    DCHECK_EQ(Size(), siz);
+    DCHECK_EQ(Tid(), tid);
+    DCHECK_EQ(StackId(), stk);
   }
+
+  u32 Tid() const {
+    return GetLsb(raw[1] >> ((1 + 44 + 31) % 64), kTidBits);
+  }
+
+  uptr Size() const {
+    return raw[1] >> ((1 + 31 + 44 + kTidBits) % 64);
+  }
+
+  u32 StackId() const {
+    return (raw[0] >> (1 + 44)) | GetLsb(raw[1] << (64 - 44 - 1), 31);
+  }
+
+  SyncVar *ListHead() const {
+    return (SyncVar*)(GetLsb(raw[0] >> 1, 44) << 3);
+  }
+
+  void ListPush(SyncVar *v) {
+    SyncVar *lst = ListHead();
+    v->next = lst;
+    u64 x = (u64)v ^ (u64)lst;
+    x = (x >> 3) << 1;
+    raw[0] ^= x;
+    DCHECK_EQ(ListHead(), v);
+  }
+
+  SyncVar *ListPop() {
+    SyncVar *lst = ListHead();
+    SyncVar *nxt = lst->next;
+    lst->next = 0;
+    u64 x = (u64)lst ^ (u64)nxt;
+    x = (x >> 3) << 1;
+    raw[0] ^= x;
+    DCHECK_EQ(ListHead(), nxt);
+    return lst;
+  }
+
+  void ListReset() {
+    SyncVar *lst = ListHead();
+    u64 x = (u64)lst;
+    x = (x >> 3) << 1;
+    raw[0] ^= x;
+    DCHECK_EQ(ListHead(), 0);
+  }
+
+  void Lock();
+  void Unlock();
+  typedef GenericScopedLock<MBlock> ScopedLock;
 };
 
 #ifndef TSAN_GO

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_report.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_report.cc?rev=177312&r1=177311&r2=177312&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_report.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_report.cc Mon Mar 18 14:47:36 2013
@@ -216,6 +216,13 @@ static ThreadContext *FindThreadByUidLoc
   return 0;
 }
 
+static ThreadContext *FindThreadByTidLocked(int tid) {
+  Context *ctx = CTX();
+  ctx->thread_registry->CheckLocked();
+  return static_cast<ThreadContext*>(
+      ctx->thread_registry->GetThreadLocked(tid));
+}
+
 static bool IsInStackOrTls(ThreadContextBase *tctx_base, void *arg) {
   uptr addr = (uptr)arg;
   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
@@ -306,20 +313,20 @@ void ScopedReport::AddLocation(uptr addr
   }
   if (allocator()->PointerIsMine((void*)addr)) {
     MBlock *b = user_mblock(0, (void*)addr);
-    ThreadContext *tctx = FindThreadByUidLocked(b->alloc_tid);
+    ThreadContext *tctx = FindThreadByTidLocked(b->Tid());
     void *mem = internal_alloc(MBlockReportLoc, sizeof(ReportLocation));
     ReportLocation *loc = new(mem) ReportLocation();
     rep_->locs.PushBack(loc);
     loc->type = ReportLocationHeap;
     loc->addr = (uptr)allocator()->GetBlockBegin((void*)addr);
-    loc->size = b->size;
-    loc->tid = tctx ? tctx->tid : b->alloc_tid;
+    loc->size = b->Size();
+    loc->tid = tctx ? tctx->tid : b->Tid();
     loc->name = 0;
     loc->file = 0;
     loc->line = 0;
     loc->stack = 0;
     uptr ssz = 0;
-    const uptr *stack = StackDepotGet(b->alloc_stack_id, &ssz);
+    const uptr *stack = StackDepotGet(b->StackId(), &ssz);
     if (stack) {
       StackTrace trace;
       trace.Init(stack, ssz);

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_sync.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_sync.cc?rev=177312&r1=177311&r2=177312&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_sync.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_sync.cc Mon Mar 18 14:47:36 2013
@@ -82,9 +82,9 @@ SyncVar* SyncTab::GetAndLock(ThreadState
   // the hashmap anyway.
   if (PrimaryAllocator::PointerIsMine((void*)addr)) {
     MBlock *b = user_mblock(thr, (void*)addr);
-    Lock l(&b->mtx);
+    MBlock::ScopedLock l(b);
     SyncVar *res = 0;
-    for (res = b->head; res; res = res->next) {
+    for (res = b->ListHead(); res; res = res->next) {
       if (res->addr == addr)
         break;
     }
@@ -92,8 +92,7 @@ SyncVar* SyncTab::GetAndLock(ThreadState
       if (!create)
         return 0;
       res = Create(thr, pc, addr);
-      res->next = b->head;
-      b->head = res;
+      b->ListPush(res);
     }
     if (write_lock)
       res->mtx.Lock();
@@ -149,25 +148,34 @@ SyncVar* SyncTab::GetAndRemove(ThreadSta
     MBlock *b = user_mblock(thr, (void*)addr);
     SyncVar *res = 0;
     {
-      Lock l(&b->mtx);
-      SyncVar **prev = &b->head;
-      res = *prev;
-      while (res) {
+      MBlock::ScopedLock l(b);
+      res = b->ListHead();
+      if (res) {
         if (res->addr == addr) {
           if (res->is_linker_init)
             return 0;
-          *prev = res->next;
-          break;
+          b->ListPop();
+        } else {
+          SyncVar **prev = &res->next;
+          res = *prev;
+          while (res) {
+            if (res->addr == addr) {
+              if (res->is_linker_init)
+                return 0;
+              *prev = res->next;
+              break;
+            }
+            prev = &res->next;
+            res = *prev;
+          }
+        }
+        if (res) {
+          StatInc(thr, StatSyncDestroyed);
+          res->mtx.Lock();
+          res->mtx.Unlock();
         }
-        prev = &res->next;
-        res = *prev;
       }
     }
-    if (res) {
-      StatInc(thr, StatSyncDestroyed);
-      res->mtx.Lock();
-      res->mtx.Unlock();
-    }
     return res;
   }
 #endif

Modified: compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc?rev=177312&r1=177311&r2=177312&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc (original)
+++ compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc Mon Mar 18 14:47:36 2013
@@ -55,10 +55,10 @@ TEST(Mman, User) {
   EXPECT_NE(p2, p);
   MBlock *b = user_mblock(thr, p);
   EXPECT_NE(b, (MBlock*)0);
-  EXPECT_EQ(b->size, (uptr)10);
+  EXPECT_EQ(b->Size(), (uptr)10);
   MBlock *b2 = user_mblock(thr, p2);
   EXPECT_NE(b2, (MBlock*)0);
-  EXPECT_EQ(b2->size, (uptr)20);
+  EXPECT_EQ(b2->Size(), (uptr)20);
   for (int i = 0; i < 10; i++) {
     p[i] = 42;
     EXPECT_EQ(b, user_mblock(thr, p + i));





More information about the llvm-commits mailing list