[llvm-commits] [compiler-rt] r173332 - in /compiler-rt/trunk/lib: sanitizer_common/sanitizer_allocator.h sanitizer_common/tests/sanitizer_allocator_test.cc tsan/rtl/tsan_mman.cc tsan/rtl/tsan_mman.h tsan/rtl/tsan_rtl.cc tsan/rtl/tsan_rtl_thread.cc tsan/tests/unit/tsan_mman_test.cc

Dmitry Vyukov dvyukov at google.com
Thu Jan 24 01:08:04 PST 2013


Author: dvyukov
Date: Thu Jan 24 03:08:03 2013
New Revision: 173332

URL: http://llvm.org/viewvc/llvm-project?rev=173332&view=rev
Log:
tsan: implement malloc stats querying

Modified:
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h
    compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_mman.h
    compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.cc
    compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_thread.cc
    compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h?rev=173332&r1=173331&r2=173332&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h Thu Jan 24 03:08:03 2013
@@ -180,6 +180,86 @@
     CompactSizeClassMap;
 template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
 
+// Memory allocator statistics
+enum AllocatorStat {
+  AllocatorStatMalloced,
+  AllocatorStatFreed,
+  AllocatorStatMmapped,
+  AllocatorStatUnmapped,
+  AllocatorStatCount
+};
+
+typedef u64 AllocatorStatCounters[AllocatorStatCount];
+
+// Per-thread stats, live in per-thread cache.
+class AllocatorStats {
+ public:
+  void Init() {
+    internal_memset(this, 0, sizeof(*this));
+  }
+
+  void Add(AllocatorStat i, u64 v) {
+    v += atomic_load(&stats_[i], memory_order_relaxed);
+    atomic_store(&stats_[i], v, memory_order_relaxed);
+  }
+
+  void Set(AllocatorStat i, u64 v) {
+    atomic_store(&stats_[i], v, memory_order_relaxed);
+  }
+
+  u64 Get(AllocatorStat i) const {
+    return atomic_load(&stats_[i], memory_order_relaxed);
+  }
+
+ private:
+  friend class AllocatorGlobalStats;
+  AllocatorStats *next_;
+  AllocatorStats *prev_;
+  atomic_uint64_t stats_[AllocatorStatCount];
+};
+
+// Global stats, used for aggregation and querying.
+class AllocatorGlobalStats : public AllocatorStats {
+ public:
+  void Init() {
+    internal_memset(this, 0, sizeof(*this));
+    next_ = this;
+    prev_ = this;
+  }
+
+  void Register(AllocatorStats *s) {
+    SpinMutexLock l(&mu_);
+    s->next_ = next_;
+    s->prev_ = this;
+    next_->prev_ = s;
+    next_ = s;
+  }
+
+  void Unregister(AllocatorStats *s) {
+    SpinMutexLock l(&mu_);
+    s->prev_->next_ = s->next_;
+    s->next_->prev_ = s->prev_;
+    for (int i = 0; i < AllocatorStatCount; i++)
+      Add(AllocatorStat(i), s->Get(AllocatorStat(i)));
+  }
+
+  void Get(AllocatorStatCounters s) const {
+    internal_memset(s, 0, AllocatorStatCount * sizeof(u64));
+    SpinMutexLock l(&mu_);
+    const AllocatorStats *stats = this;
+    for (;;) {
+      for (int i = 0; i < AllocatorStatCount; i++)
+        s[i] += stats->Get(AllocatorStat(i));
+      stats = stats->next_;
+      if (stats == this)
+        break;
+    }
+  }
+
+ private:
+  mutable SpinMutex mu_;
+};
+
 // Allocators call these callbacks on mmap/munmap.
 struct NoOpMapUnmapCallback {
   void OnMap(uptr p, uptr size) const { }
@@ -233,17 +313,18 @@
       alignment <= SizeClassMap::kMaxSize;
   }
 
-  Batch *NOINLINE AllocateBatch(AllocatorCache *c, uptr class_id) {
+  Batch *NOINLINE AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
+                                uptr class_id) {
     CHECK_LT(class_id, kNumClasses);
     RegionInfo *region = GetRegionInfo(class_id);
     Batch *b = region->free_list.Pop();
     if (b == 0)
-      b = PopulateFreeList(c, class_id, region);
+      b = PopulateFreeList(stat, c, class_id, region);
     region->n_allocated += b->count;
     return b;
   }
 
-  void NOINLINE DeallocateBatch(uptr class_id, Batch *b) {
+  void NOINLINE DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
     RegionInfo *region = GetRegionInfo(class_id);
     region->free_list.Push(b);
     region->n_freed += b->count;
@@ -370,8 +451,8 @@
     return offset / (u32)size;
   }
 
-  Batch *NOINLINE PopulateFreeList(AllocatorCache *c, uptr class_id,
-                                   RegionInfo *region) {
+  Batch *NOINLINE PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
+                                   uptr class_id, RegionInfo *region) {
     BlockingMutexLock l(&region->mutex);
     Batch *b = region->free_list.Pop();
     if (b)
@@ -388,6 +469,7 @@
         map_size += kUserMapSize;
       CHECK_GE(region->mapped_user + map_size, end_idx);
       MapWithCallback(region_beg + region->mapped_user, map_size);
+      stat->Add(AllocatorStatMmapped, map_size);
       region->mapped_user += map_size;
     }
     uptr total_count = (region->mapped_user - beg_idx - size)
@@ -469,6 +551,7 @@
     MapUnmapCallback().OnMap((uptr)res, size);
     return res;
   }
+
   void UnmapWithCallback(uptr beg, uptr size) {
     MapUnmapCallback().OnUnmap(beg, size);
     UnmapOrDie(reinterpret_cast<void *>(beg), size);
@@ -490,19 +573,20 @@
     return reinterpret_cast<void*>(meta);
   }
 
-  Batch *NOINLINE AllocateBatch(AllocatorCache *c, uptr class_id) {
+  Batch *NOINLINE AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
+                                uptr class_id) {
     CHECK_LT(class_id, kNumClasses);
     SizeClassInfo *sci = GetSizeClassInfo(class_id);
     SpinMutexLock l(&sci->mutex);
     if (sci->free_list.empty())
-      PopulateFreeList(c, sci, class_id);
+      PopulateFreeList(stat, c, sci, class_id);
     CHECK(!sci->free_list.empty());
     Batch *b = sci->free_list.front();
     sci->free_list.pop_front();
     return b;
   }
 
-  void NOINLINE DeallocateBatch(uptr class_id, Batch *b) {
+  void NOINLINE DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
     CHECK_LT(class_id, kNumClasses);
     SizeClassInfo *sci = GetSizeClassInfo(class_id);
     SpinMutexLock l(&sci->mutex);
@@ -579,11 +663,12 @@
     return mem & ~(kRegionSize - 1);
   }
 
-  uptr AllocateRegion(uptr class_id) {
+  uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
     CHECK_LT(class_id, kNumClasses);
     uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
                                       "SizeClassAllocator32"));
     MapUnmapCallback().OnMap(res, kRegionSize);
+    stat->Add(AllocatorStatMmapped, kRegionSize);
     CHECK_EQ(0U, (res & (kRegionSize - 1)));
     CHECK_EQ(0U, state_->possible_regions[ComputeRegionId(res)]);
     state_->possible_regions[ComputeRegionId(res)] = class_id;
@@ -595,9 +680,10 @@
     return &state_->size_class_info_array[class_id];
   }
 
-  void PopulateFreeList(AllocatorCache *c, SizeClassInfo *sci, uptr class_id) {
+  void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
+                        SizeClassInfo *sci, uptr class_id) {
     uptr size = SizeClassMap::Size(class_id);
-    uptr reg = AllocateRegion(class_id);
+    uptr reg = AllocateRegion(stat, class_id);
     uptr n_chunks = kRegionSize / (size + kMetadataSize);
     uptr max_count = SizeClassMap::MaxCached(class_id);
     Batch *b = 0;
@@ -634,14 +720,22 @@
   typedef SizeClassAllocator Allocator;
   static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
 
-  // Don't need to call Init if the object is a global (i.e. zero-initialized).
-  void Init() {
-    internal_memset(this, 0, sizeof(*this));
+  void Init(AllocatorGlobalStats *s) {
+    stats_.Init();
+    if (s)
+      s->Register(&stats_);
+  }
+
+  void Destroy(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
+    Drain(allocator);
+    if (s)
+      s->Unregister(&stats_);
   }
 
   void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
     CHECK_NE(class_id, 0UL);
     CHECK_LT(class_id, kNumClasses);
+    stats_.Add(AllocatorStatMalloced, SizeClassMap::Size(class_id));
     PerClass *c = &per_class_[class_id];
     if (UNLIKELY(c->count == 0))
       Refill(allocator, class_id);
@@ -653,6 +747,7 @@
   void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
     CHECK_NE(class_id, 0UL);
     CHECK_LT(class_id, kNumClasses);
+    stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id));
     PerClass *c = &per_class_[class_id];
     if (UNLIKELY(c->count == c->max_count))
       Drain(allocator, class_id);
@@ -676,6 +771,7 @@
     void *batch[2 * SizeClassMap::kMaxNumCached];
   };
   PerClass per_class_[kNumClasses];
+  AllocatorStats stats_;
 
   void InitCache() {
     if (per_class_[0].max_count)
@@ -689,7 +785,8 @@
   void NOINLINE Refill(SizeClassAllocator *allocator, uptr class_id) {
     InitCache();
     PerClass *c = &per_class_[class_id];
-    Batch *b = allocator->AllocateBatch(this, class_id);
+    Batch *b = allocator->AllocateBatch(&stats_, this, class_id);
+    CHECK_GT(b->count, 0);
     for (uptr i = 0; i < b->count; i++)
       c->batch[i] = b->batch[i];
     c->count = b->count;
@@ -712,7 +809,7 @@
     }
     b->count = cnt;
     c->count -= cnt;
-    allocator->DeallocateBatch(class_id, b);
+    allocator->DeallocateBatch(&stats_, class_id, b);
   }
 };
 
@@ -727,7 +824,7 @@
     page_size_ = GetPageSizeCached();
   }
 
-  void *Allocate(uptr size, uptr alignment) {
+  void *Allocate(AllocatorStats *stat, uptr size, uptr alignment) {
     CHECK(IsPowerOfTwo(alignment));
     uptr map_size = RoundUpMapSize(size);
     if (alignment > page_size_)
@@ -758,11 +855,13 @@
       stats.currently_allocated += map_size;
       stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
       stats.by_size_log[size_log]++;
+      stat->Add(AllocatorStatMalloced, map_size);
+      stat->Add(AllocatorStatMmapped, map_size);
     }
     return reinterpret_cast<void*>(res);
   }
 
-  void Deallocate(void *p) {
+  void Deallocate(AllocatorStats *stat, void *p) {
     Header *h = GetHeader(p);
     {
       SpinMutexLock l(&mutex_);
@@ -774,6 +873,8 @@
       n_chunks_--;
       stats.n_frees++;
       stats.currently_allocated -= h->map_size;
+      stat->Add(AllocatorStatFreed, h->map_size);
+      stat->Add(AllocatorStatUnmapped, h->map_size);
     }
     MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
     UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
@@ -886,6 +987,7 @@
   void Init() {
     primary_.Init();
     secondary_.Init();
+    stats_.Init();
   }
 
   void *Allocate(AllocatorCache *cache, uptr size, uptr alignment,
@@ -901,7 +1003,7 @@
     if (primary_.CanAllocate(size, alignment))
       res = cache->Allocate(&primary_, primary_.ClassID(size));
     else
-      res = secondary_.Allocate(size, alignment);
+      res = secondary_.Allocate(&stats_, size, alignment);
     if (alignment > 8)
       CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0);
     if (cleared && res)
@@ -914,7 +1016,7 @@
     if (primary_.PointerIsMine(p))
       cache->Deallocate(&primary_, primary_.GetSizeClass(p), p);
     else
-      secondary_.Deallocate(p);
+      secondary_.Deallocate(&stats_, p);
   }
 
   void *Reallocate(AllocatorCache *cache, void *p, uptr new_size,
@@ -969,10 +1071,22 @@
 
   void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
 
+  void InitCache(AllocatorCache *cache) {
+    cache->Init(&stats_);
+  }
+
+  void DestroyCache(AllocatorCache *cache) {
+    cache->Destroy(&primary_, &stats_);
+  }
+
   void SwallowCache(AllocatorCache *cache) {
     cache->Drain(&primary_);
   }
 
+  void GetStats(AllocatorStatCounters s) const {
+    stats_.Get(s);
+  }
+
   void PrintStats() {
     primary_.PrintStats();
     secondary_.PrintStats();
@@ -981,6 +1095,7 @@
  private:
   PrimaryAllocator primary_;
   SecondaryAllocator secondary_;
+  AllocatorGlobalStats stats_;
 };
 
 }  // namespace __sanitizer

Modified: compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc?rev=173332&r1=173331&r2=173332&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc Thu Jan 24 03:08:03 2013
@@ -63,7 +63,8 @@
   Allocator *a = new Allocator;
   a->Init();
   SizeClassAllocatorLocalCache<Allocator> cache;
-  cache.Init();
+  memset(&cache, 0, sizeof(cache));
+  cache.Init(0);
 
   static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
     50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
@@ -137,7 +138,8 @@
   Allocator *a = new Allocator;
   a->Init();
   SizeClassAllocatorLocalCache<Allocator> cache;
-  cache.Init();
+  memset(&cache, 0, sizeof(cache));
+  cache.Init(0);
   static volatile void *sink;
 
   const uptr kNumAllocs = 10000;
@@ -191,8 +193,11 @@
   a->Init();
   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
   SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
-  cache.Init();
-  a->AllocateBatch(&cache, 64);
+  memset(&cache, 0, sizeof(cache));
+  cache.Init(0);
+  AllocatorStats stats;
+  stats.Init();
+  a->AllocateBatch(&stats, &cache, 64);
   EXPECT_EQ(TestMapUnmapCallback::map_count, 3);  // State + alloc + metadata.
   a->TestOnlyUnmap();
   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.
@@ -210,8 +215,11 @@
   a->Init();
   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
   SizeClassAllocatorLocalCache<Allocator32WithCallBack>  cache;
-  cache.Init();
-  a->AllocateBatch(&cache, 64);
+  memset(&cache, 0, sizeof(cache));
+  cache.Init(0);
+  AllocatorStats stats;
+  stats.Init();
+  a->AllocateBatch(&stats, &cache, 64);
   EXPECT_EQ(TestMapUnmapCallback::map_count, 2);  // alloc.
   a->TestOnlyUnmap();
   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 2);  // The whole thing + alloc.
@@ -226,9 +234,11 @@
   TestMapUnmapCallback::unmap_count = 0;
   LargeMmapAllocator<TestMapUnmapCallback> a;
   a.Init();
-  void *x = a.Allocate(1 << 20, 1);
+  AllocatorStats stats;
+  stats.Init();
+  void *x = a.Allocate(&stats, 1 << 20, 1);
   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
-  a.Deallocate(x);
+  a.Deallocate(&stats, x);
   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
 }
 
@@ -237,9 +247,12 @@
   Allocator a;
   a.Init();
   SizeClassAllocatorLocalCache<Allocator> cache;
-  cache.Init();
+  memset(&cache, 0, sizeof(cache));
+  cache.Init(0);
+  AllocatorStats stats;
+  stats.Init();
   for (int i = 0; i < 1000000; i++) {
-    a.AllocateBatch(&cache, 64);
+    a.AllocateBatch(&stats, &cache, 64);
   }
 
   a.TestOnlyUnmap();
@@ -254,13 +267,15 @@
 TEST(SanitizerCommon, LargeMmapAllocator) {
   LargeMmapAllocator<> a;
   a.Init();
+  AllocatorStats stats;
+  stats.Init();
 
   static const int kNumAllocs = 1000;
   char *allocated[kNumAllocs];
   static const uptr size = 4000;
   // Allocate some.
   for (int i = 0; i < kNumAllocs; i++) {
-    allocated[i] = (char *)a.Allocate(size, 1);
+    allocated[i] = (char *)a.Allocate(&stats, size, 1);
     CHECK(a.PointerIsMine(allocated[i]));
   }
   // Deallocate all.
@@ -268,14 +283,14 @@
   for (int i = 0; i < kNumAllocs; i++) {
     char *p = allocated[i];
     CHECK(a.PointerIsMine(p));
-    a.Deallocate(p);
+    a.Deallocate(&stats, p);
   }
   // Check that non left.
   CHECK_EQ(a.TotalMemoryUsed(), 0);
 
   // Allocate some more, also add metadata.
   for (int i = 0; i < kNumAllocs; i++) {
-    char *x = (char *)a.Allocate(size, 1);
+    char *x = (char *)a.Allocate(&stats, size, 1);
     CHECK_GE(a.GetActuallyAllocatedSize(x), size);
     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
     *meta = i;
@@ -294,7 +309,7 @@
     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
     CHECK_EQ(*meta, idx);
     CHECK(a.PointerIsMine(p));
-    a.Deallocate(p);
+    a.Deallocate(&stats, p);
   }
   CHECK_EQ(a.TotalMemoryUsed(), 0);
 
@@ -304,7 +319,7 @@
     const uptr kNumAlignedAllocs = 100;
     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
       uptr size = ((i % 10) + 1) * 4096;
-      char *p = allocated[i] = (char *)a.Allocate(size, alignment);
+      char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
       CHECK_EQ(p, a.GetBlockBegin(p));
       CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
       CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
@@ -312,7 +327,7 @@
       p[0] = p[size - 1] = 0;
     }
     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
-      a.Deallocate(allocated[i]);
+      a.Deallocate(&stats, allocated[i]);
     }
   }
 }
@@ -327,7 +342,8 @@
   a->Init();
 
   AllocatorCache cache;
-  cache.Init();
+  memset(&cache, 0, sizeof(cache));
+  a->InitCache(&cache);
 
   EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
   EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
@@ -363,6 +379,7 @@
     allocated.clear();
     a->SwallowCache(&cache);
   }
+  a->DestroyCache(&cache);
   a->TestOnlyUnmap();
 }
 
@@ -388,14 +405,13 @@
 
 template <class AllocatorCache>
 void TestSizeClassAllocatorLocalCache() {
-  static AllocatorCache static_allocator_cache;
-  static_allocator_cache.Init();
   AllocatorCache cache;
   typedef typename AllocatorCache::Allocator Allocator;
   Allocator *a = new Allocator();
 
   a->Init();
-  cache.Init();
+  memset(&cache, 0, sizeof(cache));
+  cache.Init(0);
 
   const uptr kNumAllocs = 10000;
   const int kNumIter = 100;

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc?rev=173332&r1=173331&r2=173332&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc Thu Jan 24 03:08:03 2013
@@ -38,8 +38,16 @@
   allocator()->Init();
 }
 
-void AlloctorThreadFinish(ThreadState *thr) {
-  allocator()->SwallowCache(&thr->alloc_cache);
+void AllocatorThreadStart(ThreadState *thr) {
+  allocator()->InitCache(&thr->alloc_cache);
+}
+
+void AllocatorThreadFinish(ThreadState *thr) {
+  allocator()->DestroyCache(&thr->alloc_cache);
+}
+
+void AllocatorPrintStats() {
+  allocator()->PrintStats();
 }
 
 static void SignalUnsafeCall(ThreadState *thr, uptr pc) {
@@ -169,30 +177,44 @@
 
 extern "C" {
 uptr __tsan_get_current_allocated_bytes() {
-  return 0;
+  u64 stats[AllocatorStatCount];
+  allocator()->GetStats(stats);
+  u64 m = stats[AllocatorStatMalloced];
+  u64 f = stats[AllocatorStatFreed];
+  return m >= f ? m - f : 1;
 }
 
 uptr __tsan_get_heap_size() {
-  return 1;
+  u64 stats[AllocatorStatCount];
+  allocator()->GetStats(stats);
+  u64 m = stats[AllocatorStatMmapped];
+  u64 f = stats[AllocatorStatUnmapped];
+  return m >= f ? m - f : 1;
 }
 
 uptr __tsan_get_free_bytes() {
-  return 1;
+  return 0;
 }
 
 uptr __tsan_get_unmapped_bytes() {
-  return 1;
+  return 0;
 }
 
 uptr __tsan_get_estimated_allocated_size(uptr size) {
   return size;
 }
 
-bool __tsan_get_ownership(const void *p) {
-  return true;
+bool __tsan_get_ownership(void *p) {
+  return allocator()->GetBlockBegin(p) != 0;
 }
 
-uptr __tsan_get_allocated_size(const void *p) {
-  return 0;
+uptr __tsan_get_allocated_size(void *p) {
+  if (p == 0)
+    return 0;
+  p = allocator()->GetBlockBegin(p);
+  if (p == 0)
+    return 0;
+  MBlock *b = (MBlock*)allocator()->GetMetaData(p);
+  return b->size;
 }
 }  // extern "C"

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_mman.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_mman.h?rev=173332&r1=173331&r2=173332&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_mman.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_mman.h Thu Jan 24 03:08:03 2013
@@ -20,7 +20,9 @@
 const uptr kDefaultAlignment = 16;
 
 void InitializeAllocator();
-void AlloctorThreadFinish(ThreadState *thr);
+void AllocatorThreadStart(ThreadState *thr);
+void AllocatorThreadFinish(ThreadState *thr);
+void AllocatorPrintStats();
 
 // For user allocations.
 void *user_alloc(ThreadState *thr, uptr pc, uptr sz,

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.cc?rev=173332&r1=173331&r2=173332&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.cc Thu Jan 24 03:08:03 2013
@@ -255,6 +255,11 @@
   ctx->report_mtx.Lock();
   ctx->report_mtx.Unlock();
 
+#ifndef TSAN_GO
+  if (ctx->flags.verbosity)
+    AllocatorPrintStats();
+#endif
+
   ThreadFinalize(thr);
 
   if (ctx->nreported) {

Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_thread.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_thread.cc?rev=173332&r1=173331&r2=173332&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_thread.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_rtl_thread.cc Thu Jan 24 03:08:03 2013
@@ -209,6 +209,9 @@
   thr->shadow_stack_pos = thr->shadow_stack;
   thr->shadow_stack_end = thr->shadow_stack + kInitStackSize;
 #endif
+#ifndef TSAN_GO
+  AllocatorThreadStart(thr);
+#endif
   tctx->thr = thr;
   thr->fast_synch_epoch = tctx->epoch0;
   thr->clock.set(tid, tctx->epoch0);
@@ -269,7 +272,7 @@
   tctx->epoch1 = thr->fast_state.epoch();
 
 #ifndef TSAN_GO
-  AlloctorThreadFinish(thr);
+  AllocatorThreadFinish(thr);
 #endif
   thr->~ThreadState();
   StatAggregate(ctx->stat, thr->stat);

Modified: compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc?rev=173332&r1=173331&r2=173332&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc (original)
+++ compiler-rt/trunk/lib/tsan/tests/unit/tsan_mman_test.cc Thu Jan 24 03:08:03 2013
@@ -14,6 +14,16 @@
 #include "tsan_rtl.h"
 #include "gtest/gtest.h"
 
+extern "C" {
+uptr __tsan_get_current_allocated_bytes();
+uptr __tsan_get_heap_size();
+uptr __tsan_get_free_bytes();
+uptr __tsan_get_unmapped_bytes();
+uptr __tsan_get_estimated_allocated_size(uptr size);
+bool __tsan_get_ownership(void *p);
+uptr __tsan_get_allocated_size(void *p);
+}
+
 namespace __tsan {
 
 TEST(Mman, Internal) {
@@ -106,4 +116,33 @@
   }
 }
 
+TEST(Mman, Stats) {
+  ScopedInRtl in_rtl;
+  ThreadState *thr = cur_thread();
+
+  uptr alloc0 = __tsan_get_current_allocated_bytes();
+  uptr heap0 = __tsan_get_heap_size();
+  uptr free0 = __tsan_get_free_bytes();
+  uptr unmapped0 = __tsan_get_unmapped_bytes();
+
+  EXPECT_EQ(__tsan_get_estimated_allocated_size(10), (uptr)10);
+  EXPECT_EQ(__tsan_get_estimated_allocated_size(20), (uptr)20);
+  EXPECT_EQ(__tsan_get_estimated_allocated_size(100), (uptr)100);
+
+  char *p = (char*)user_alloc(thr, 0, 10);
+  EXPECT_EQ(__tsan_get_ownership(p), true);
+  EXPECT_EQ(__tsan_get_allocated_size(p), (uptr)10);
+
+  EXPECT_EQ(__tsan_get_current_allocated_bytes(), alloc0 + 16);
+  EXPECT_GE(__tsan_get_heap_size(), heap0);
+  EXPECT_EQ(__tsan_get_free_bytes(), free0);
+  EXPECT_EQ(__tsan_get_unmapped_bytes(), unmapped0);
+
+  user_free(thr, 0, p);
+
+  EXPECT_EQ(__tsan_get_current_allocated_bytes(), alloc0);
+  EXPECT_GE(__tsan_get_heap_size(), heap0);
+  EXPECT_EQ(__tsan_get_free_bytes(), free0);
+  EXPECT_EQ(__tsan_get_unmapped_bytes(), unmapped0);
+}
 }  // namespace __tsan





More information about the llvm-commits mailing list