[llvm-commits] [compiler-rt] r169496 - in /compiler-rt/trunk/lib/sanitizer_common: sanitizer_allocator.h tests/sanitizer_allocator_test.cc
Kostya Serebryany
kcc at google.com
Thu Dec 6 04:49:29 PST 2012
Author: kcc
Date: Thu Dec 6 06:49:28 2012
New Revision: 169496
URL: http://llvm.org/viewvc/llvm-project?rev=169496&view=rev
Log:
[asan/msan] new 32-bit allocator, basic functionality so far
Modified:
compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h
compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h?rev=169496&r1=169495&r2=169496&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h Thu Dec 6 06:49:28 2012
@@ -128,6 +128,7 @@
}
void *Allocate(uptr size, uptr alignment) {
+ if (size < alignment) size = alignment;
CHECK(CanAllocate(size, alignment));
return AllocateBySizeClass(SizeClassMap::ClassID(size));
}
@@ -181,7 +182,7 @@
uptr chunk_idx = GetChunkIdx((uptr)p, size);
uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
uptr begin = reg_beg + chunk_idx * size;
- return (void*)begin;
+ return reinterpret_cast<void*>(begin);
}
static uptr GetActuallyAllocatedSize(void *p) {
@@ -220,7 +221,6 @@
private:
static const uptr kRegionSize = kSpaceSize / kNumClasses;
COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
- COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
// kRegionSize must be >= 2^32.
COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
// Populate the free list with at most this number of bytes at once
@@ -258,10 +258,10 @@
}
void PopulateFreeList(uptr class_id, RegionInfo *region) {
+ CHECK(region->free_list.empty());
uptr size = SizeClassMap::Size(class_id);
uptr beg_idx = region->allocated_user;
uptr end_idx = beg_idx + kPopulateSize;
- region->free_list.clear();
uptr region_beg = kSpaceBeg + kRegionSize * class_id;
uptr idx = beg_idx;
uptr i = 0;
@@ -301,6 +301,161 @@
}
};
+// SizeClassAllocator32 -- allocator for 32-bit address space.
+// This allocator can theoretically be used on 64-bit arch, but there it is less
+// efficient than SizeClassAllocator64.
+//
+// [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
+// be returned by MmapOrDie().
+//
+// Region:
+// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
+// Since the regions are aligned by kRegionSize, there are exactly
+// kNumPossibleRegions possible regions in the address space and so we keep
+// an u8 array possible_regions_[kNumPossibleRegions] to store the size classes.
+// 0 size class means the region is not used by the allocator.
+//
+// One Region is used to allocate chunks of a single size class.
+// A Region looks like this:
+// UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
+//
+// In order to avoid false sharing the objects of this class should be
+// chache-line aligned.
+template <const uptr kSpaceBeg, const u64 kSpaceSize,
+ const uptr kMetadataSize, class SizeClassMap>
+class SizeClassAllocator32 {
+ public:
+ // Don't need to call Init if the object is a global (i.e. zero-initialized).
+ void Init() {
+ internal_memset(this, 0, sizeof(*this));
+ }
+
+ bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ void *Allocate(uptr size, uptr alignment) {
+ if (size < alignment) size = alignment;
+ CHECK(CanAllocate(size, alignment));
+ return AllocateBySizeClass(SizeClassMap::ClassID(size));
+ }
+
+ void Deallocate(void *p) {
+ CHECK(PointerIsMine(p));
+ DeallocateBySizeClass(p, GetSizeClass(p));
+ }
+
+ void *GetMetaData(void *p) {
+ CHECK(PointerIsMine(p));
+ uptr mem = reinterpret_cast<uptr>(p);
+ uptr beg = ComputeRegionBeg(mem);
+ uptr size = SizeClassMap::Size(GetSizeClass(p));
+ u32 offset = mem - beg;
+ uptr n = offset / (u32)size; // 32-bit division
+ uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
+ return (void*)meta;
+ }
+
+ bool PointerIsMine(void *p) {
+ return possible_regions_[ComputeRegionId(reinterpret_cast<uptr>(p))] != 0;
+ }
+
+ uptr GetSizeClass(void *p) {
+ return possible_regions_[ComputeRegionId(reinterpret_cast<uptr>(p))] - 1;
+ }
+
+ uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return SizeClassMap::Size(GetSizeClass(p));
+ }
+
+ uptr TotalMemoryUsed() {
+ // No need to lock here.
+ uptr res = 0;
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (possible_regions_[i])
+ res += kRegionSize;
+ return res;
+ }
+
+ void TestOnlyUnmap() {
+ for (uptr i = 0; i < kNumPossibleRegions; i++)
+ if (possible_regions_[i])
+ UnmapOrDie(reinterpret_cast<void*>(i * kRegionSize), kRegionSize);
+ }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses; // 2^k <= 128
+ private:
+ static const uptr kRegionSizeLog = SANITIZER_WORDSIZE == 64 ? 24 : 20;
+ static const uptr kRegionSize = 1 << kRegionSizeLog;
+ static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
+ COMPILER_CHECK(kNumClasses <= 128);
+
+ struct SizeClassInfo {
+ SpinMutex mutex;
+ AllocatorFreeList free_list;
+ char padding[kCacheLineSize - sizeof(uptr) - sizeof (AllocatorFreeList)];
+ };
+ COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
+
+ uptr ComputeRegionId(uptr mem) {
+ uptr res = mem >> kRegionSizeLog;
+ CHECK_LT(res, kNumPossibleRegions);
+ return res;
+ }
+
+ uptr ComputeRegionBeg(uptr mem) {
+ return mem & ~(kRegionSize - 1);
+ }
+
+ uptr AllocateRegion(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
+ "SizeClassAllocator32"));
+ CHECK_EQ(0U, (res & (kRegionSize - 1)));
+ CHECK_EQ(0U, possible_regions_[ComputeRegionId(res)]);
+ possible_regions_[ComputeRegionId(res)] = class_id + 1;
+ return res;
+ }
+
+ SizeClassInfo *GetSizeClassInfo(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ return &size_class_info_array_[class_id];
+ }
+
+ void EnsureSizeClassHasAvailableChunks(SizeClassInfo *sci, uptr class_id) {
+ if (!sci->free_list.empty()) return;
+ uptr size = SizeClassMap::Size(class_id);
+ uptr reg = AllocateRegion(class_id);
+ uptr n_chunks = kRegionSize / (size + kMetadataSize);
+ for (uptr i = reg; i < reg + n_chunks * size; i += size)
+ sci->free_list.push_back(reinterpret_cast<AllocatorListNode*>(i));
+ }
+
+ void *AllocateBySizeClass(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ EnsureSizeClassHasAvailableChunks(sci, class_id);
+ CHECK(!sci->free_list.empty());
+ AllocatorListNode *node = sci->free_list.front();
+ sci->free_list.pop_front();
+ return reinterpret_cast<void*>(node);
+ }
+
+ void DeallocateBySizeClass(void *p, uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ SizeClassInfo *sci = GetSizeClassInfo(class_id);
+ SpinMutexLock l(&sci->mutex);
+ sci->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
+ }
+
+ u8 possible_regions_[kNumPossibleRegions];
+ SizeClassInfo size_class_info_array_[kNumClasses];
+};
+
// Objects of this type should be used as local caches for SizeClassAllocator64.
// Since the typical use of this class is to have one object per thread in TLS,
// is has to be POD.
Modified: compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc?rev=169496&r1=169495&r2=169496&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc Thu Dec 6 06:49:28 2012
@@ -23,14 +23,20 @@
#if SANITIZER_WORDSIZE == 64
static const uptr kAllocatorSpace = 0x700000000000ULL;
static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
+static const u64 kAddressSpaceSize = 1ULL << 47;
typedef SizeClassAllocator64<
kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
typedef SizeClassAllocator64<
kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
+#else
+static const u64 kAddressSpaceSize = 1ULL << 32;
#endif
+typedef SizeClassAllocator32<
+ 0, kAddressSpaceSize, 16, CompactSizeClassMap> Allocator32Compact;
+
template <class SizeClassMap>
void TestSizeClassMap() {
typedef SizeClassMap SCMap;
@@ -71,8 +77,8 @@
template <class Allocator>
void TestSizeClassAllocator() {
- Allocator a;
- a.Init();
+ Allocator *a = new Allocator;
+ a->Init();
static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
50000, 60000, 100000, 300000, 500000, 1000000, 2000000};
@@ -82,19 +88,19 @@
uptr last_total_allocated = 0;
for (int i = 0; i < 5; i++) {
// Allocate a bunch of chunks.
- for (uptr s = 0; s < sizeof(sizes) /sizeof(sizes[0]); s++) {
+ for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
uptr size = sizes[s];
- if (!a.CanAllocate(size, 1)) continue;
+ if (!a->CanAllocate(size, 1)) continue;
// printf("s = %ld\n", size);
uptr n_iter = std::max((uptr)2, 1000000 / size);
for (uptr i = 0; i < n_iter; i++) {
- void *x = a.Allocate(size, 1);
+ void *x = a->Allocate(size, 1);
allocated.push_back(x);
- CHECK(a.PointerIsMine(x));
- CHECK_GE(a.GetActuallyAllocatedSize(x), size);
- uptr class_id = a.GetSizeClass(x);
+ CHECK(a->PointerIsMine(x));
+ CHECK_GE(a->GetActuallyAllocatedSize(x), size);
+ uptr class_id = a->GetSizeClass(x);
CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
- uptr *metadata = reinterpret_cast<uptr*>(a.GetMetaData(x));
+ uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
metadata[0] = reinterpret_cast<uptr>(x) + 1;
metadata[1] = 0xABCD;
}
@@ -102,19 +108,20 @@
// Deallocate all.
for (uptr i = 0; i < allocated.size(); i++) {
void *x = allocated[i];
- uptr *metadata = reinterpret_cast<uptr*>(a.GetMetaData(x));
+ uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
CHECK_EQ(metadata[1], 0xABCD);
- a.Deallocate(x);
+ a->Deallocate(x);
}
allocated.clear();
- uptr total_allocated = a.TotalMemoryUsed();
+ uptr total_allocated = a->TotalMemoryUsed();
if (last_total_allocated == 0)
last_total_allocated = total_allocated;
CHECK_EQ(last_total_allocated, total_allocated);
}
- a.TestOnlyUnmap();
+ a->TestOnlyUnmap();
+ delete a;
}
#if SANITIZER_WORDSIZE == 64
@@ -127,6 +134,10 @@
}
#endif
+TEST(SanitizerCommon, SizeClassAllocator32Compact) {
+ TestSizeClassAllocator<Allocator32Compact>();
+}
+
template <class Allocator>
void SizeClassAllocator64MetadataStress() {
Allocator a;
@@ -181,7 +192,6 @@
#endif
TEST(SanitizerCommon, LargeMmapAllocator) {
- fprintf(stderr, "xxxx %ld\n", 0L);
LargeMmapAllocator a;
a.Init();
@@ -190,7 +200,6 @@
static const uptr size = 1000;
// Allocate some.
for (int i = 0; i < kNumAllocs; i++) {
- fprintf(stderr, "zzz0 %ld\n", size);
allocated[i] = a.Allocate(size, 1);
}
// Deallocate all.
@@ -205,7 +214,6 @@
// Allocate some more, also add metadata.
for (int i = 0; i < kNumAllocs; i++) {
- fprintf(stderr, "zzz1 %ld\n", size);
void *x = a.Allocate(size, 1);
CHECK_GE(a.GetActuallyAllocatedSize(x), size);
uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
@@ -227,7 +235,6 @@
for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
for (int i = 0; i < kNumAllocs; i++) {
uptr size = ((i % 10) + 1) * 4096;
- fprintf(stderr, "zzz1 %ld %ld\n", size, alignment);
allocated[i] = a.Allocate(size, alignment);
CHECK_EQ(0, (uptr)allocated[i] % alignment);
char *p = (char*)allocated[i];
More information about the llvm-commits
mailing list