[llvm-commits] [compiler-rt] r159823 - in /compiler-rt/trunk/lib/sanitizer_common: sanitizer_allocator64.h tests/sanitizer_allocator64_test.cc

Kostya Serebryany kcc at google.com
Fri Jul 6 06:46:49 PDT 2012


Author: kcc
Date: Fri Jul  6 08:46:49 2012
New Revision: 159823

URL: http://llvm.org/viewvc/llvm-project?rev=159823&view=rev
Log:
[tsan] implement SizeClassAllocatorLocalCache (part of tsan allocator)

Modified:
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h
    compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h?rev=159823&r1=159822&r2=159823&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h Fri Jul  6 08:46:49 2012
@@ -79,6 +79,13 @@
   }
 };
 
+struct AllocatorListNode {
+  AllocatorListNode *next;
+};
+
+typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
+
+
 // Space: a portion of address space of kSpaceSize bytes starting at
 // a fixed address (kSpaceBeg). Both constants are powers of two and
 // kSpaceBeg is kSpaceSize-aligned.
@@ -114,6 +121,30 @@
     CHECK(PointerIsMine(p));
     DeallocateBySizeClass(p, GetSizeClass(p));
   }
+
+  // Allocate several chunks of the given class_id.
+  void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
+    CHECK_LT(class_id, kNumClasses);
+    RegionInfo *region = GetRegionInfo(class_id);
+    SpinMutexLock l(&region->mutex);
+    if (region->free_list.empty()) {
+      PopulateFreeList(class_id, region);
+    }
+    CHECK(!region->free_list.empty());
+    // Just take as many chunks as we have in the free list now.
+    // FIXME: this might be too much.
+    free_list->append_front(&region->free_list);
+    CHECK(region->free_list.empty());
+  }
+
+  // Swallow the entire free_list for the given class_id.
+  void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
+    CHECK_LT(class_id, kNumClasses);
+    RegionInfo *region = GetRegionInfo(class_id);
+    SpinMutexLock l(&region->mutex);
+    region->free_list.append_front(free_list);
+  }
+
   bool PointerIsMine(void *p) {
     return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
   }
@@ -140,8 +171,9 @@
     UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
   }
 
- private:
   static const uptr kNumClasses = 256;  // Power of two <= 256
+
+ private:
   COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
   static const uptr kRegionSize = kSpaceSize / kNumClasses;
   COMPILER_CHECK((kRegionSize >> 32) > 0);  // kRegionSize must be >= 2^32.
@@ -149,18 +181,12 @@
   // or with one element if its size is greater.
   static const uptr kPopulateSize = 1 << 18;
 
-  struct ListNode {
-    ListNode *next;
-  };
-
-  typedef IntrusiveList<ListNode> FreeList;
-
   struct RegionInfo {
     SpinMutex mutex;
-    FreeList free_list;
+    AllocatorFreeList free_list;
     uptr allocated_user;  // Bytes allocated for user memory.
     uptr allocated_meta;  // Bytes allocated for metadata.
-    char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(FreeList)];
+    char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(AllocatorFreeList)];
   };
   COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
 
@@ -196,7 +222,7 @@
     uptr i = 0;
     do {  // do-while loop because we need to put at least one item.
       uptr p = region_beg + idx;
-      region->free_list.push_front(reinterpret_cast<ListNode*>(p));
+      region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
       idx += size;
       i++;
     } while (idx < end_idx);
@@ -213,7 +239,7 @@
       PopulateFreeList(class_id, region);
     }
     CHECK(!region->free_list.empty());
-    ListNode *node = region->free_list.front();
+    AllocatorListNode *node = region->free_list.front();
     region->free_list.pop_front();
     return reinterpret_cast<void*>(node);
   }
@@ -221,8 +247,45 @@
   void DeallocateBySizeClass(void *p, uptr class_id) {
     RegionInfo *region = GetRegionInfo(class_id);
     SpinMutexLock l(&region->mutex);
-    region->free_list.push_front(reinterpret_cast<ListNode*>(p));
+    region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
+  }
+};
+
+// Objects of this type should be used as local caches for SizeClassAllocator64.
+// Since the typical use of this class is to have one object per thread in TLS,
+// is has to be POD.
+template<const uptr kNumClasses, class SizeClassAllocator>
+struct SizeClassAllocatorLocalCache {
+  // Don't need to call Init if the object is a global (i.e. zero-initialized).
+  void Init() {
+    internal_memset(this, 0, sizeof(*this));
+  }
+
+  void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
+    CHECK_LT(class_id, kNumClasses);
+    AllocatorFreeList *free_list = &free_lists_[class_id];
+    if (free_list->empty())
+      allocator->BulkAllocate(class_id, free_list);
+    CHECK(!free_list->empty());
+    void *res = free_list->front();
+    free_list->pop_front();
+    return res;
+  }
+
+  void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) {
+    CHECK_LT(class_id, kNumClasses);
+    free_lists_[class_id].push_front(reinterpret_cast<AllocatorListNode*>(p));
   }
+
+  void Drain(SizeClassAllocator *allocator) {
+    for (uptr i = 0; i < kNumClasses; i++) {
+      allocator->BulkDeallocate(i, &free_lists_[i]);
+      CHECK(free_lists_[i].empty());
+    }
+  }
+
+  // private:
+  AllocatorFreeList free_lists_[kNumClasses];
 };
 
 // This class can (de)allocate only large chunks of memory using mmap/unmap.

Modified: compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc?rev=159823&r1=159822&r2=159823&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc Fri Jul  6 08:46:49 2012
@@ -14,9 +14,14 @@
 #include <algorithm>
 #include <vector>
 
-TEST(SanitizerCommon, DefaultSizeClassMap) {
-  typedef DefaultSizeClassMap SCMap;
+static const uptr kAllocatorSpace = 0x600000000000ULL;
+static const uptr kAllocatorSize = 0x10000000000;  // 1T.
+
+typedef DefaultSizeClassMap SCMap;
+typedef
+  SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 16, SCMap> Allocator;
 
+TEST(SanitizerCommon, DefaultSizeClassMap) {
 #if 0
   for (uptr i = 0; i < SCMap::kNumClasses; i++) {
     // printf("% 3ld: % 5ld (%4lx);   ", i, SCMap::Size(i), SCMap::Size(i));
@@ -47,14 +52,7 @@
   }
 }
 
-static const uptr kAllocatorSpace = 0x600000000000ULL;
-static const uptr kAllocatorSize = 0x10000000000;  // 1T.
-
 TEST(SanitizerCommon, SizeClassAllocator64) {
-  typedef DefaultSizeClassMap SCMap;
-  typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
-                               16, SCMap> Allocator;
-
   Allocator a;
   a.Init();
 
@@ -101,9 +99,6 @@
 
 
 TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
-  typedef DefaultSizeClassMap SCMap;
-  typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
-          16, SCMap> Allocator;
   Allocator a;
   a.Init();
   static volatile void *sink;
@@ -128,9 +123,6 @@
 }
 
 void FailInAssertionOnOOM() {
-  typedef DefaultSizeClassMap SCMap;
-  typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
-          16, SCMap> Allocator;
   Allocator a;
   a.Init();
   const uptr size = 1 << 20;
@@ -188,9 +180,7 @@
 }
 
 TEST(SanitizerCommon, CombinedAllocator) {
-  typedef DefaultSizeClassMap SCMap;
-  typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize,
-          16, SCMap> PrimaryAllocator;
+  typedef Allocator PrimaryAllocator;
   typedef LargeMmapAllocator SecondaryAllocator;
   typedef CombinedAllocator<PrimaryAllocator, SecondaryAllocator> Allocator;
 
@@ -225,3 +215,37 @@
   }
   a.TestOnlyUnmap();
 }
+
+typedef SizeClassAllocatorLocalCache<Allocator::kNumClasses, Allocator>
+  AllocatorCache;
+static THREADLOCAL AllocatorCache static_allocator_cache;
+
+TEST(SanitizerCommon, SizeClassAllocatorLocalCache) {
+  static_allocator_cache.Init();
+
+  Allocator a;
+  AllocatorCache cache;
+
+  a.Init();
+  cache.Init();
+
+  const uptr kNumAllocs = 10000;
+  const int kNumIter = 100;
+  uptr saved_total = 0;
+  for (int i = 0; i < kNumIter; i++) {
+    void *allocated[kNumAllocs];
+    for (uptr i = 0; i < kNumAllocs; i++) {
+      allocated[i] = cache.Allocate(&a, 0);
+    }
+    for (uptr i = 0; i < kNumAllocs; i++) {
+      cache.Deallocate(&a, 0, allocated[i]);
+    }
+    cache.Drain(&a);
+    uptr total_allocated = a.TotalMemoryUsed();
+    if (saved_total)
+      CHECK_EQ(saved_total, total_allocated);
+    saved_total = total_allocated;
+  }
+
+  a.TestOnlyUnmap();
+}





More information about the llvm-commits mailing list