[llvm-commits] [compiler-rt] r159204 - in /compiler-rt/trunk/lib/sanitizer_common: sanitizer_allocator64.h tests/sanitizer_allocator64_test.cc

Kostya Serebryany kcc at google.com
Tue Jun 26 07:23:32 PDT 2012


Author: kcc
Date: Tue Jun 26 09:23:32 2012
New Revision: 159204

URL: http://llvm.org/viewvc/llvm-project?rev=159204&view=rev
Log:
[tsan] added LargeMmapAllocator, a part of the new tsan allocator

Modified:
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h
    compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h?rev=159204&r1=159203&r2=159204&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h Tue Jun 26 09:23:32 2012
@@ -19,6 +19,7 @@
 
 #include "sanitizer_common.h"
 #include "sanitizer_internal_defs.h"
+#include "sanitizer_libc.h"
 
 namespace __sanitizer {
 
@@ -113,11 +114,11 @@
     return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
   }
 
-  uptr GetMetaData(void *p) {
+  void *GetMetaData(void *p) {
     uptr class_id = GetSizeClass(p);
     uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), class_id);
-    return kSpaceBeg + (kRegionSize * (class_id + 1)) -
-        (1 + chunk_idx) * kMetadataSize;
+    return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
+                                   (1 + chunk_idx) * kMetadataSize);
   }
 
   uptr TotalMemoryUsed() {
@@ -226,6 +227,97 @@
   }
 };
 
+// This class can (de)allocate only large chunks of memory using mmap/unmap.
+// The main purpose of this allocator is to cover large and rare allocation
+// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
+// The result is always page-aligned.
+class LargeMmapAllocator {
+ public:
+  void Init() {
+    internal_memset(this, 0, sizeof(*this));
+  }
+  void *Allocate(uptr size) {
+    uptr map_size = RoundUpMapSize(size);
+    void *map = MmapOrDie(map_size, "LargeMmapAllocator");
+    void *res = reinterpret_cast<void*>(reinterpret_cast<uptr>(map)
+                                        + kPageSize);
+    Header *h = GetHeader(res);
+    h->size = size;
+    {
+      // FIXME: lock
+      h->next = list_;
+      h->prev = 0;
+      if (list_)
+        list_->prev = h;
+      list_ = h;
+    }
+    return res;
+  }
+
+  void Deallocate(void *p) {
+    Header *h = GetHeader(p);
+    uptr map_size = RoundUpMapSize(h->size);
+    {
+      // FIXME: lock
+      Header *prev = h->prev;
+      Header *next = h->next;
+      if (prev)
+        prev->next = next;
+      if (next)
+        next->prev = prev;
+      if (h == list_)
+        list_ = next;
+    }
+    UnmapOrDie(h, map_size);
+  }
+
+  uptr TotalMemoryUsed() {
+    // FIXME: lock
+    uptr res = 0;
+    for (Header *l = list_; l; l = l->next) {
+      res += RoundUpMapSize(l->size);
+    }
+    return res;
+  }
+
+  bool PointerIsMine(void *p) {
+    // Fast check.
+    if ((reinterpret_cast<uptr>(p) % kPageSize) != 0) return false;
+    // FIXME: lock
+    for (Header *l = list_; l; l = l->next) {
+      if (GetUser(l) == p) return true;
+    }
+    return false;
+  }
+
+  // At least kPageSize/2 metadata bytes is available.
+  void *GetMetaData(void *p) {
+    return GetHeader(p) + 1;
+  }
+
+ private:
+  struct Header {
+    uptr size;
+    Header *next;
+    Header *prev;
+  };
+
+  Header *GetHeader(void *p) {
+    return reinterpret_cast<Header*>(reinterpret_cast<uptr>(p) - kPageSize);
+  }
+
+  void *GetUser(Header *h) {
+    return reinterpret_cast<void*>(reinterpret_cast<uptr>(h) + kPageSize);
+  }
+
+  uptr RoundUpMapSize(uptr size) {
+    return RoundUpTo(size, kPageSize) + kPageSize;
+  }
+
+  Header *list_;
+  uptr lock_;  // FIXME
+};
+
 }  // namespace __sanitizer
 
 #endif  // SANITIZER_ALLOCATOR_H

Modified: compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc?rev=159204&r1=159203&r2=159204&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc Tue Jun 26 09:23:32 2012
@@ -106,7 +106,7 @@
           16, SCMap> Allocator;
   Allocator a;
   a.Init();
-  static volatile uptr sink;
+  static volatile void *sink;
 
   const uptr kNumAllocs = 10000;
   void *allocated[kNumAllocs];
@@ -144,3 +144,44 @@
   EXPECT_DEATH(FailInAssertionOnOOM(),
                "allocated_user.*allocated_meta.*kRegionSize");
 }
+
+TEST(SanitizerCommon, LargeMmapAllocator) {
+  LargeMmapAllocator a;
+  a.Init();
+
+  static const int kNumAllocs = 100;
+  void *allocated[kNumAllocs];
+  static const uptr size = 1000;
+  // Allocate some.
+  for (int i = 0; i < kNumAllocs; i++) {
+    allocated[i] = a.Allocate(size);
+  }
+  // Deallocate all.
+  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
+  for (int i = 0; i < kNumAllocs; i++) {
+    void *p = allocated[i];
+    CHECK(a.PointerIsMine(p));
+    a.Deallocate(p);
+  }
+  // Check that non left.
+  CHECK_EQ(a.TotalMemoryUsed(), 0);
+
+  // Allocate some more, also add metadata.
+  for (int i = 0; i < kNumAllocs; i++) {
+    void *x = a.Allocate(size);
+    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
+    *meta = i;
+    allocated[i] = x;
+  }
+  CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
+  // Deallocate all in reverse order.
+  for (int i = 0; i < kNumAllocs; i++) {
+    int idx = kNumAllocs - i - 1;
+    void *p = allocated[idx];
+    uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
+    CHECK_EQ(*meta, idx);
+    CHECK(a.PointerIsMine(p));
+    a.Deallocate(p);
+  }
+  CHECK_EQ(a.TotalMemoryUsed(), 0);
+}





More information about the llvm-commits mailing list