[llvm-commits] [compiler-rt] r171161 - in /compiler-rt/trunk/lib: asan/asan_allocator.cc asan/asan_allocator.h asan/asan_allocator2.cc asan/asan_stats.cc msan/msan.cc msan/msan_report.cc sanitizer_common/sanitizer_allocator.h
Kostya Serebryany
kcc at google.com
Thu Dec 27 06:09:20 PST 2012
Author: kcc
Date: Thu Dec 27 08:09:19 2012
New Revision: 171161
URL: http://llvm.org/viewvc/llvm-project?rev=171161&view=rev
Log:
[sanitizer] add statistics to the allocator; fix lint
Modified:
compiler-rt/trunk/lib/asan/asan_allocator.cc
compiler-rt/trunk/lib/asan/asan_allocator.h
compiler-rt/trunk/lib/asan/asan_allocator2.cc
compiler-rt/trunk/lib/asan/asan_stats.cc
compiler-rt/trunk/lib/msan/msan.cc
compiler-rt/trunk/lib/msan/msan_report.cc
compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h
Modified: compiler-rt/trunk/lib/asan/asan_allocator.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/asan/asan_allocator.cc?rev=171161&r1=171160&r2=171161&view=diff
==============================================================================
--- compiler-rt/trunk/lib/asan/asan_allocator.cc (original)
+++ compiler-rt/trunk/lib/asan/asan_allocator.cc Thu Dec 27 08:09:19 2012
@@ -687,6 +687,9 @@
namespace __asan {
+void PrintInternalAllocatorStats() {
+}
+
SANITIZER_INTERFACE_ATTRIBUTE
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
AllocType alloc_type) {
Modified: compiler-rt/trunk/lib/asan/asan_allocator.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/asan/asan_allocator.h?rev=171161&r1=171160&r2=171161&view=diff
==============================================================================
--- compiler-rt/trunk/lib/asan/asan_allocator.h (original)
+++ compiler-rt/trunk/lib/asan/asan_allocator.h Thu Dec 27 08:09:19 2012
@@ -105,8 +105,9 @@
}
AsanChunkFifoList quarantine_;
+#if ASAN_ALLOCATOR_VERSION == 1
AsanChunk *free_lists_[kNumberOfSizeClasses];
-#if ASAN_ALLOCATOR_VERSION == 2
+#else
uptr allocator2_cache[1024]; // Opaque.
#endif
void CommitBack();
@@ -214,6 +215,8 @@
void asan_mz_force_lock();
void asan_mz_force_unlock();
+void PrintInternalAllocatorStats();
+
// Log2 and RoundUpToPowerOfTwo should be inlined for performance.
#if defined(_WIN32) && !defined(__clang__)
extern "C" {
Modified: compiler-rt/trunk/lib/asan/asan_allocator2.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/asan/asan_allocator2.cc?rev=171161&r1=171160&r2=171161&view=diff
==============================================================================
--- compiler-rt/trunk/lib/asan/asan_allocator2.cc (original)
+++ compiler-rt/trunk/lib/asan/asan_allocator2.cc Thu Dec 27 08:09:19 2012
@@ -590,6 +590,10 @@
allocator.SwallowCache(GetAllocatorCache(this));
}
+void PrintInternalAllocatorStats() {
+ allocator.PrintStats();
+}
+
SANITIZER_INTERFACE_ATTRIBUTE
void *asan_memalign(uptr alignment, uptr size, StackTrace *stack,
AllocType alloc_type) {
Modified: compiler-rt/trunk/lib/asan/asan_stats.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/asan/asan_stats.cc?rev=171161&r1=171160&r2=171161&view=diff
==============================================================================
--- compiler-rt/trunk/lib/asan/asan_stats.cc (original)
+++ compiler-rt/trunk/lib/asan/asan_stats.cc Thu Dec 27 08:09:19 2012
@@ -66,6 +66,7 @@
StackDepotStats *stack_depot_stats = StackDepotGetStats();
Printf("Stats: StackDepot: %zd ids; %zdM mapped\n",
stack_depot_stats->n_uniq_ids, stack_depot_stats->mapped >> 20);
+ PrintInternalAllocatorStats();
}
} // namespace __asan
Modified: compiler-rt/trunk/lib/msan/msan.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/msan/msan.cc?rev=171161&r1=171160&r2=171161&view=diff
==============================================================================
--- compiler-rt/trunk/lib/msan/msan.cc (original)
+++ compiler-rt/trunk/lib/msan/msan.cc Thu Dec 27 08:09:19 2012
@@ -228,7 +228,8 @@
Printf("FATAL: MemorySanitizer can not mmap the shadow memory.\n");
Printf("FATAL: Make sure to compile with -fPIE and to link with -pie.\n");
Printf("FATAL: Disabling ASLR is known to cause this error.\n");
- Printf("FATAL: If running under GDB, try 'set disable-randomization off'.\n");
+ Printf("FATAL: If running under GDB, try "
+ "'set disable-randomization off'.\n");
DumpProcessMap();
Die();
}
Modified: compiler-rt/trunk/lib/msan/msan_report.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/msan/msan_report.cc?rev=171161&r1=171160&r2=171161&view=diff
==============================================================================
--- compiler-rt/trunk/lib/msan/msan_report.cc (original)
+++ compiler-rt/trunk/lib/msan/msan_report.cc Thu Dec 27 08:09:19 2012
@@ -1,4 +1,4 @@
-//===-- msan_report.cc -----------------------------------------------------===//
+//===-- msan_report.cc ----------------------------------------------------===//
//
// The LLVM Compiler Infrastructure
//
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h?rev=171161&r1=171160&r2=171161&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h Thu Dec 27 08:09:19 2012
@@ -166,12 +166,14 @@
// Move at most max_count chunks from allocate_from to allocate_to.
// This function is better be a method of AllocatorFreeList, but we can't
// inherit it from IntrusiveList as the ancient gcc complains about non-PODness.
-static inline void BulkMove(uptr max_count,
+static inline uptr BulkMove(uptr max_count,
AllocatorFreeList *allocate_from,
AllocatorFreeList *allocate_to) {
CHECK(!allocate_from->empty());
CHECK(allocate_to->empty());
+ uptr res = 0;
if (allocate_from->size() <= max_count) {
+ res = allocate_from->size();
allocate_to->append_front(allocate_from);
CHECK(allocate_from->empty());
} else {
@@ -180,9 +182,11 @@
allocate_from->pop_front();
allocate_to->push_front(node);
}
+ res = max_count;
CHECK(!allocate_from->empty());
}
CHECK(!allocate_to->empty());
+ return res;
}
// Allocators call these callbacks on mmap/munmap.
@@ -252,7 +256,8 @@
if (region->free_list.empty()) {
PopulateFreeList(class_id, region);
}
- BulkMove(SizeClassMap::MaxCached(class_id), ®ion->free_list, free_list);
+ region->n_allocated += BulkMove(SizeClassMap::MaxCached(class_id),
+ ®ion->free_list, free_list);
}
// Swallow the entire free_list for the given class_id.
@@ -260,6 +265,7 @@
CHECK_LT(class_id, kNumClasses);
RegionInfo *region = GetRegionInfo(class_id);
SpinMutexLock l(®ion->mutex);
+ region->n_freed += free_list->size();
region->free_list.append_front(free_list);
}
@@ -311,6 +317,31 @@
UnmapWithCallback(kSpaceBeg, kSpaceSize + AdditionalSize());
}
+ void PrintStats() {
+ uptr total_mapped = 0;
+ uptr n_allocated = 0;
+ uptr n_freed = 0;
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ total_mapped += region->mapped_user;
+ n_allocated += region->n_allocated;
+ n_freed += region->n_freed;
+ }
+ Printf("Stats: SizeClassAllocator64: %zdM mapped in %zd allocations; "
+ "remains %zd\n",
+ total_mapped >> 20, n_allocated, n_allocated - n_freed);
+ for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ if (region->mapped_user == 0) continue;
+ Printf(" %02zd (%zd): total: %zd K allocs: %zd remains: %zd\n",
+ class_id,
+ SizeClassMap::Size(class_id),
+ region->mapped_user >> 10,
+ region->n_allocated,
+ region->n_allocated - region->n_freed);
+ }
+ }
+
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
static const uptr kNumClassesRounded = SizeClassMap::kNumClassesRounded;
@@ -336,14 +367,13 @@
uptr allocated_meta; // Bytes allocated for metadata.
uptr mapped_user; // Bytes mapped for user memory.
uptr mapped_meta; // Bytes mapped for metadata.
+ uptr n_allocated, n_freed; // Just stats.
};
COMPILER_CHECK(sizeof(RegionInfo) >= kCacheLineSize);
static uptr AdditionalSize() {
- uptr PageSize = GetPageSizeCached();
- uptr res = Max(sizeof(RegionInfo) * kNumClassesRounded, PageSize);
- CHECK_EQ(res % PageSize, 0);
- return res;
+ return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,
+ GetPageSizeCached());
}
RegionInfo *GetRegionInfo(uptr class_id) {
@@ -415,6 +445,7 @@
CHECK(!region->free_list.empty());
AllocatorListNode *node = region->free_list.front();
region->free_list.pop_front();
+ region->n_allocated++;
return reinterpret_cast<void*>(node);
}
@@ -422,6 +453,7 @@
RegionInfo *region = GetRegionInfo(class_id);
SpinMutexLock l(®ion->mutex);
region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
+ region->n_freed++;
}
};
@@ -550,6 +582,9 @@
UnmapWithCallback(reinterpret_cast<uptr>(state_), sizeof(State));
}
+ void PrintStats() {
+ }
+
typedef SizeClassMap SizeClassMapT;
static const uptr kNumClasses = SizeClassMap::kNumClasses;
@@ -718,6 +753,9 @@
CHECK_LT(idx, kMaxNumChunks);
h->chunk_idx = idx;
chunks_[idx] = h;
+ stats.n_allocs++;
+ stats.currently_allocated += map_size;
+ stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
}
return reinterpret_cast<void*>(res);
}
@@ -732,6 +770,8 @@
chunks_[idx] = chunks_[n_chunks_ - 1];
chunks_[idx]->chunk_idx = idx;
n_chunks_--;
+ stats.n_frees++;
+ stats.currently_allocated -= h->map_size;
}
MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
@@ -785,6 +825,13 @@
return GetUser(h);
}
+ void PrintStats() {
+ Printf("Stats: LargeMmapAllocator: allocated %zd times, "
+ "remains %zd (%zd K) max %zd M\n",
+ stats.n_allocs, stats.n_allocs - stats.n_frees,
+ stats.currently_allocated >> 10, stats.max_allocated >> 20);
+ }
+
private:
static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
struct Header {
@@ -812,6 +859,9 @@
uptr page_size_;
Header *chunks_[kMaxNumChunks];
uptr n_chunks_;
+ struct Stats {
+ uptr n_allocs, n_frees, currently_allocated, max_allocated;
+ } stats;
SpinMutex mutex_;
};
@@ -919,6 +969,11 @@
cache->Drain(&primary_);
}
+ void PrintStats() {
+ primary_.PrintStats();
+ secondary_.PrintStats();
+ }
+
private:
PrimaryAllocator primary_;
SecondaryAllocator secondary_;
More information about the llvm-commits
mailing list