[llvm-commits] [compiler-rt] r171099 - in /compiler-rt/trunk/lib: asan/asan_allocator2.cc asan/asan_flags.h asan/asan_rtl.cc asan/asan_stats.cc sanitizer_common/sanitizer_stackdepot.cc sanitizer_common/sanitizer_stackdepot.h

Kostya Serebryany kcc at google.com
Tue Dec 25 22:30:03 PST 2012


Author: kcc
Date: Wed Dec 26 00:30:02 2012
New Revision: 171099

URL: http://llvm.org/viewvc/llvm-project?rev=171099&view=rev
Log:
[asan] asan_allocator2: by default use the StackDepot to store the stack traces instead of storing them in the redzones

Modified:
    compiler-rt/trunk/lib/asan/asan_allocator2.cc
    compiler-rt/trunk/lib/asan/asan_flags.h
    compiler-rt/trunk/lib/asan/asan_rtl.cc
    compiler-rt/trunk/lib/asan/asan_stats.cc
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.h

Modified: compiler-rt/trunk/lib/asan/asan_allocator2.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/asan/asan_allocator2.cc?rev=171099&r1=171098&r2=171099&view=diff
==============================================================================
--- compiler-rt/trunk/lib/asan/asan_allocator2.cc (original)
+++ compiler-rt/trunk/lib/asan/asan_allocator2.cc Wed Dec 26 00:30:02 2012
@@ -26,6 +26,7 @@
 #include "sanitizer_common/sanitizer_allocator.h"
 #include "sanitizer_common/sanitizer_internal_defs.h"
 #include "sanitizer_common/sanitizer_list.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
 
 namespace __asan {
 
@@ -162,9 +163,8 @@
   }
   void *AllocBeg() {
     if (from_memalign)
-      return reinterpret_cast<uptr>(
-          allocator.GetBlockBegin(reinterpret_cast<void *>(this)));
-    return Beg() - ComputeRZSize(0);
+      return allocator.GetBlockBegin(reinterpret_cast<void *>(this));
+    return reinterpret_cast<void*>(Beg() - ComputeRZSize(0));
   }
   // We store the alloc/free stack traces in the chunk itself.
   u32 *AllocStackBeg() {
@@ -189,14 +189,29 @@
 uptr AsanChunkView::AllocTid() { return chunk_->alloc_tid; }
 uptr AsanChunkView::FreeTid() { return chunk_->free_tid; }
 
+static void GetStackTraceFromId(u32 id, StackTrace *stack) {
+  CHECK(id);
+  uptr size = 0;
+  const uptr *trace = StackDepotGet(id, &size);
+  CHECK_LT(size, kStackTraceMax);
+  internal_memcpy(stack->trace, trace, sizeof(uptr) * size);
+  stack->size = size;
+}
+
 void AsanChunkView::GetAllocStack(StackTrace *stack) {
-  StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
-                              chunk_->AllocStackSize());
+  if (flags()->use_stack_depot)
+    GetStackTraceFromId(chunk_->alloc_context_id, stack);
+  else
+    StackTrace::UncompressStack(stack, chunk_->AllocStackBeg(),
+                                chunk_->AllocStackSize());
 }
 
 void AsanChunkView::GetFreeStack(StackTrace *stack) {
-  StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
-                              chunk_->FreeStackSize());
+  if (flags()->use_stack_depot)
+    GetStackTraceFromId(chunk_->free_context_id, stack);
+  else
+    StackTrace::UncompressStack(stack, chunk_->FreeStackBeg(),
+                                chunk_->FreeStackSize());
 }
 
 class Quarantine: public AsanChunkFifoList {
@@ -341,7 +356,13 @@
     m->user_requested_size = SizeClassMap::kMaxSize;
     *reinterpret_cast<uptr *>(allocator.GetMetaData(allocated)) = size;
   }
-  StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
+
+  if (flags()->use_stack_depot) {
+    m->alloc_context_id = StackDepotPut(stack->trace, stack->size);
+  } else {
+    m->alloc_context_id = 0;
+    StackTrace::CompressStack(stack, m->AllocStackBeg(), m->AllocStackSize());
+  }
 
   uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY);
   // Unpoison the bulk of the memory region.
@@ -391,7 +412,12 @@
     CHECK_EQ(m->free_tid, kInvalidTid);
   AsanThread *t = asanThreadRegistry().GetCurrent();
   m->free_tid = t ? t->tid() : 0;
-  StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
+  if (flags()->use_stack_depot) {
+    m->free_context_id = StackDepotPut(stack->trace, stack->size);
+  } else {
+    m->free_context_id = 0;
+    StackTrace::CompressStack(stack, m->FreeStackBeg(), m->FreeStackSize());
+  }
   CHECK(m->chunk_state == CHUNK_QUARANTINE);
   // Poison the region.
   PoisonShadow(m->Beg(),

Modified: compiler-rt/trunk/lib/asan/asan_flags.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/asan/asan_flags.h?rev=171099&r1=171098&r2=171099&view=diff
==============================================================================
--- compiler-rt/trunk/lib/asan/asan_flags.h (original)
+++ compiler-rt/trunk/lib/asan/asan_flags.h Wed Dec 26 00:30:02 2012
@@ -104,6 +104,8 @@
   bool poison_heap;
   // Report errors on malloc/delete, new/free, new/delete[], etc.
   bool alloc_dealloc_mismatch;
+  // Use stack depot instead of storing stacks in the redzones.
+  bool use_stack_depot;
 };
 
 Flags *flags();

Modified: compiler-rt/trunk/lib/asan/asan_rtl.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/asan/asan_rtl.cc?rev=171099&r1=171098&r2=171099&view=diff
==============================================================================
--- compiler-rt/trunk/lib/asan/asan_rtl.cc (original)
+++ compiler-rt/trunk/lib/asan/asan_rtl.cc Wed Dec 26 00:30:02 2012
@@ -108,6 +108,7 @@
   ParseFlag(str, &f->fast_unwind_on_malloc, "fast_unwind_on_malloc");
   ParseFlag(str, &f->poison_heap, "poison_heap");
   ParseFlag(str, &f->alloc_dealloc_mismatch, "alloc_dealloc_mismatch");
+  ParseFlag(str, &f->use_stack_depot, "use_stack_depot");
 }
 
 void InitializeFlags(Flags *f, const char *env) {
@@ -145,6 +146,7 @@
   f->fast_unwind_on_malloc = true;
   f->poison_heap = true;
   f->alloc_dealloc_mismatch = false;
+  f->use_stack_depot = true;  // Only affects allocator2.
 
   // Override from user-specified string.
   ParseFlagsFromString(f, MaybeCallAsanDefaultOptions());

Modified: compiler-rt/trunk/lib/asan/asan_stats.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/asan/asan_stats.cc?rev=171099&r1=171098&r2=171099&view=diff
==============================================================================
--- compiler-rt/trunk/lib/asan/asan_stats.cc (original)
+++ compiler-rt/trunk/lib/asan/asan_stats.cc Wed Dec 26 00:30:02 2012
@@ -17,6 +17,7 @@
 #include "asan_stats.h"
 #include "asan_thread_registry.h"
 #include "sanitizer/asan_interface.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
 
 namespace __asan {
 
@@ -62,6 +63,9 @@
   // Use lock to keep reports from mixing up.
   ScopedLock lock(&print_lock);
   stats.Print();
+  StackDepotStats *stack_depot_stats = StackDepotGetStats();
+  Printf("Stats: StackDepot: %zd ids; %zdM mapped\n",
+         stack_depot_stats->n_uniq_ids, stack_depot_stats->mapped >> 20);
 }
 
 }  // namespace __asan

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc?rev=171099&r1=171098&r2=171099&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.cc Wed Dec 26 00:30:02 2012
@@ -42,6 +42,12 @@
   atomic_uint32_t seq[kPartCount];  // Unique id generators.
 } depot;
 
+static StackDepotStats stats;
+
+StackDepotStats *StackDepotGetStats() {
+  return &stats;
+}
+
 static u32 hash(const uptr *stack, uptr size) {
   // murmur2
   const u32 m = 0x5bd1e995;
@@ -77,7 +83,7 @@
 }
 
 static StackDesc *allocDesc(uptr size) {
-  // Frist, try to allocate optimisitically.
+  // First, try to allocate optimisitically.
   uptr memsz = sizeof(StackDesc) + (size - 1) * sizeof(uptr);
   StackDesc *s = tryallocDesc(memsz);
   if (s)
@@ -93,6 +99,7 @@
     if (allocsz < memsz)
       allocsz = memsz;
     uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
+    stats.mapped += allocsz;
     atomic_store(&depot.region_end, mem + allocsz, memory_order_release);
     atomic_store(&depot.region_pos, mem, memory_order_release);
   }
@@ -156,6 +163,7 @@
   }
   uptr part = (h % kTabSize) / kPartSize;
   id = atomic_fetch_add(&depot.seq[part], 1, memory_order_relaxed) + 1;
+  stats.n_uniq_ids++;
   CHECK_LT(id, kMaxId);
   id |= part << kPartShift;
   CHECK_NE(id, 0);

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.h?rev=171099&r1=171098&r2=171099&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_stackdepot.h Wed Dec 26 00:30:02 2012
@@ -24,6 +24,13 @@
 // Retrieves a stored stack trace by the id.
 const uptr *StackDepotGet(u32 id, uptr *size);
 
+struct StackDepotStats {
+  uptr n_uniq_ids;
+  uptr mapped;
+};
+
+StackDepotStats *StackDepotGetStats();
+
 }  // namespace __sanitizer
 
 #endif  // SANITIZER_STACKDEPOT_H





More information about the llvm-commits mailing list