[compiler-rt] r184700 - Revert to C-style callbacks for iteration over allocator chunks.

Sergey Matveev earthdok at google.com
Mon Jun 24 01:34:51 PDT 2013


Author: smatveev
Date: Mon Jun 24 03:34:50 2013
New Revision: 184700

URL: http://llvm.org/viewvc/llvm-project?rev=184700&view=rev
Log:
Revert to C-style callbacks for iteration over allocator chunks.

Also clean up LSan code, fix some comments and replace void* with uptr
to bring down the number of reinterpret_casts.

Modified:
    compiler-rt/trunk/lib/asan/asan_allocator2.cc
    compiler-rt/trunk/lib/lsan/lit_tests/TestCases/disabler_in_tsd_destructor.cc
    compiler-rt/trunk/lib/lsan/lsan_allocator.cc
    compiler-rt/trunk/lib/lsan/lsan_common.cc
    compiler-rt/trunk/lib/lsan/lsan_common.h
    compiler-rt/trunk/lib/lsan/lsan_common_linux.cc
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h
    compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc

Modified: compiler-rt/trunk/lib/asan/asan_allocator2.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/asan/asan_allocator2.cc?rev=184700&r1=184699&r2=184700&view=diff
==============================================================================
--- compiler-rt/trunk/lib/asan/asan_allocator2.cc (original)
+++ compiler-rt/trunk/lib/asan/asan_allocator2.cc Mon Jun 24 03:34:50 2013
@@ -718,26 +718,25 @@ void GetAllocatorGlobalRange(uptr *begin
   *end = *begin + sizeof(__asan::allocator);
 }
 
-void *PointsIntoChunk(void* p) {
+uptr PointsIntoChunk(void* p) {
   uptr addr = reinterpret_cast<uptr>(p);
   __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
   if (!m) return 0;
   uptr chunk = m->Beg();
   if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr))
-    return reinterpret_cast<void *>(chunk);
+    return chunk;
   return 0;
 }
 
-void *GetUserBegin(void *p) {
+uptr GetUserBegin(uptr chunk) {
   __asan::AsanChunk *m =
-      __asan::GetAsanChunkByAddrFastLocked(reinterpret_cast<uptr>(p));
+      __asan::GetAsanChunkByAddrFastLocked(chunk);
   CHECK(m);
-  return reinterpret_cast<void *>(m->Beg());
+  return m->Beg();
 }
 
-LsanMetadata::LsanMetadata(void *chunk) {
-  uptr addr = reinterpret_cast<uptr>(chunk);
-  metadata_ = reinterpret_cast<void *>(addr - __asan::kChunkHeaderSize);
+LsanMetadata::LsanMetadata(uptr chunk) {
+  metadata_ = reinterpret_cast<void *>(chunk - __asan::kChunkHeaderSize);
 }
 
 bool LsanMetadata::allocated() const {
@@ -765,19 +764,9 @@ u32 LsanMetadata::stack_trace_id() const
   return m->alloc_context_id;
 }
 
-template <typename Callable> void ForEachChunk(Callable const &callback) {
-  __asan::allocator.ForEachChunk(callback);
+void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+  __asan::allocator.ForEachChunk(callback, arg);
 }
-#if CAN_SANITIZE_LEAKS
-template void ForEachChunk<ProcessPlatformSpecificAllocationsCb>(
-    ProcessPlatformSpecificAllocationsCb const &callback);
-template void ForEachChunk<PrintLeakedCb>(PrintLeakedCb const &callback);
-template void ForEachChunk<CollectLeaksCb>(CollectLeaksCb const &callback);
-template void ForEachChunk<MarkIndirectlyLeakedCb>(
-    MarkIndirectlyLeakedCb const &callback);
-template void ForEachChunk<CollectIgnoredCb>(
-    CollectIgnoredCb const &callback);
-#endif  // CAN_SANITIZE_LEAKS
 
 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
   uptr addr = reinterpret_cast<uptr>(p);

Modified: compiler-rt/trunk/lib/lsan/lit_tests/TestCases/disabler_in_tsd_destructor.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/lsan/lit_tests/TestCases/disabler_in_tsd_destructor.cc?rev=184700&r1=184699&r2=184700&view=diff
==============================================================================
--- compiler-rt/trunk/lib/lsan/lit_tests/TestCases/disabler_in_tsd_destructor.cc (original)
+++ compiler-rt/trunk/lib/lsan/lit_tests/TestCases/disabler_in_tsd_destructor.cc Mon Jun 24 03:34:50 2013
@@ -12,7 +12,7 @@
 
 pthread_key_t key;
 
-void key_destructor(void *) {
+void key_destructor(void *arg) {
   __lsan::ScopedDisabler d;
   void *p = malloc(1337);
   // Break optimization.

Modified: compiler-rt/trunk/lib/lsan/lsan_allocator.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/lsan/lsan_allocator.cc?rev=184700&r1=184699&r2=184700&view=diff
==============================================================================
--- compiler-rt/trunk/lib/lsan/lsan_allocator.cc (original)
+++ compiler-rt/trunk/lib/lsan/lsan_allocator.cc Mon Jun 24 03:34:50 2013
@@ -52,7 +52,7 @@ void AllocatorThreadFinish() {
 }
 
 static ChunkMetadata *Metadata(void *p) {
-  return (ChunkMetadata *)allocator.GetMetaData(p);
+  return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p));
 }
 
 static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) {
@@ -62,14 +62,14 @@ static void RegisterAllocation(const Sta
   m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked;
   m->stack_trace_id = StackDepotPut(stack.trace, stack.size);
   m->requested_size = size;
-  atomic_store((atomic_uint8_t*)m, 1, memory_order_relaxed);
+  atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed);
 }
 
 static void RegisterDeallocation(void *p) {
   if (!p) return;
   ChunkMetadata *m = Metadata(p);
   CHECK(m);
-  atomic_store((atomic_uint8_t*)m, 0, memory_order_relaxed);
+  atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed);
 }
 
 void *Allocate(const StackTrace &stack, uptr size, uptr alignment,
@@ -129,25 +129,26 @@ void GetAllocatorGlobalRange(uptr *begin
   *end = *begin + sizeof(allocator);
 }
 
-void *PointsIntoChunk(void* p) {
-  void *chunk = allocator.GetBlockBeginFastLocked(p);
+uptr PointsIntoChunk(void* p) {
+  uptr addr = reinterpret_cast<uptr>(p);
+  uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p));
   if (!chunk) return 0;
   // LargeMmapAllocator considers pointers to the meta-region of a chunk to be
   // valid, but we don't want that.
-  if (p < chunk) return 0;
-  ChunkMetadata *m = Metadata(chunk);
+  if (addr < chunk) return 0;
+  ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk));
   CHECK(m);
-  if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size)
+  if (m->allocated && addr < chunk + m->requested_size)
     return chunk;
   return 0;
 }
 
-void *GetUserBegin(void *p) {
-  return p;
+uptr GetUserBegin(uptr chunk) {
+  return chunk;
 }
 
-LsanMetadata::LsanMetadata(void *chunk) {
-  metadata_ = Metadata(chunk);
+LsanMetadata::LsanMetadata(uptr chunk) {
+  metadata_ = Metadata(reinterpret_cast<void *>(chunk));
   CHECK(metadata_);
 }
 
@@ -171,20 +172,10 @@ u32 LsanMetadata::stack_trace_id() const
   return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id;
 }
 
-template<typename Callable>
-void ForEachChunk(Callable const &callback) {
-  allocator.ForEachChunk(callback);
+void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+  allocator.ForEachChunk(callback, arg);
 }
 
-template void ForEachChunk<ProcessPlatformSpecificAllocationsCb>(
-    ProcessPlatformSpecificAllocationsCb const &callback);
-template void ForEachChunk<PrintLeakedCb>(PrintLeakedCb const &callback);
-template void ForEachChunk<CollectLeaksCb>(CollectLeaksCb const &callback);
-template void ForEachChunk<MarkIndirectlyLeakedCb>(
-    MarkIndirectlyLeakedCb const &callback);
-template void ForEachChunk<CollectIgnoredCb>(
-    CollectIgnoredCb const &callback);
-
 IgnoreObjectResult IgnoreObjectLocked(const void *p) {
   void *chunk = allocator.GetBlockBegin(p);
   if (!chunk || p < chunk) return kIgnoreObjectInvalid;

Modified: compiler-rt/trunk/lib/lsan/lsan_common.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/lsan/lsan_common.cc?rev=184700&r1=184699&r2=184700&view=diff
==============================================================================
--- compiler-rt/trunk/lib/lsan/lsan_common.cc (original)
+++ compiler-rt/trunk/lib/lsan/lsan_common.cc Mon Jun 24 03:34:50 2013
@@ -23,7 +23,7 @@
 #if CAN_SANITIZE_LEAKS
 namespace __lsan {
 
-// This mutex is used to prevent races between DoLeakCheck and SuppressObject.
+// This mutex is used to prevent races between DoLeakCheck and IgnoreObject.
 BlockingMutex global_mutex(LINKER_INITIALIZED);
 
 THREADLOCAL int disable_counter;
@@ -84,12 +84,12 @@ static inline bool CanBeAHeapPointer(upt
 #endif
 }
 
-// Scan the memory range, looking for byte patterns that point into allocator
-// chunks. Mark those chunks with tag and add them to the frontier.
-// There are two usage modes for this function: finding reachable or ignored 
-// chunks (tag = kReachable or kIgnored) and finding indirectly leaked chunks
-// (tag = kIndirectlyLeaked). In the second case, there's no flood fill,
-// so frontier = 0.
+// Scans the memory range, looking for byte patterns that point into allocator
+// chunks. Marks those chunks with |tag| and adds them to |frontier|.
+// There are two usage modes for this function: finding reachable or ignored
+// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
+// (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
+// so |frontier| = 0.
 void ScanRangeForPointers(uptr begin, uptr end,
                           Frontier *frontier,
                           const char *region_type, ChunkTag tag) {
@@ -99,10 +99,10 @@ void ScanRangeForPointers(uptr begin, up
   uptr pp = begin;
   if (pp % alignment)
     pp = pp + alignment - pp % alignment;
-  for (; pp + sizeof(void *) <= end; pp += alignment) {
+  for (; pp + sizeof(void *) <= end; pp += alignment) {  // NOLINT
     void *p = *reinterpret_cast<void**>(pp);
     if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
-    void *chunk = PointsIntoChunk(p);
+    uptr chunk = PointsIntoChunk(p);
     if (!chunk) continue;
     LsanMetadata m(chunk);
     // Reachable beats ignored beats leaked.
@@ -111,14 +111,13 @@ void ScanRangeForPointers(uptr begin, up
     m.set_tag(tag);
     if (flags()->log_pointers)
       Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
-             chunk, reinterpret_cast<uptr>(chunk) + m.requested_size(),
-             m.requested_size());
+             chunk, chunk + m.requested_size(), m.requested_size());
     if (frontier)
-      frontier->push_back(reinterpret_cast<uptr>(chunk));
+      frontier->push_back(chunk);
   }
 }
 
-// Scan thread data (stacks and TLS) for heap pointers.
+// Scans thread data (stacks and TLS) for heap pointers.
 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
                            Frontier *frontier) {
   InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
@@ -191,31 +190,34 @@ static void FloodFillTag(Frontier *front
   while (frontier->size()) {
     uptr next_chunk = frontier->back();
     frontier->pop_back();
-    LsanMetadata m(reinterpret_cast<void *>(next_chunk));
+    LsanMetadata m(next_chunk);
     ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
                          "HEAP", tag);
   }
 }
 
-// Mark leaked chunks which are reachable from other leaked chunks.
-void MarkIndirectlyLeakedCb::operator()(void *p) const {
-  p = GetUserBegin(p);
-  LsanMetadata m(p);
+// ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
+// which are reachable from it as indirectly leaked.
+static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
+  chunk = GetUserBegin(chunk);
+  LsanMetadata m(chunk);
   if (m.allocated() && m.tag() != kReachable) {
-    ScanRangeForPointers(reinterpret_cast<uptr>(p),
-                         reinterpret_cast<uptr>(p) + m.requested_size(),
+    ScanRangeForPointers(chunk, chunk + m.requested_size(),
                          /* frontier */ 0, "HEAP", kIndirectlyLeaked);
   }
 }
 
-void CollectIgnoredCb::operator()(void *p) const {
-  p = GetUserBegin(p);
-  LsanMetadata m(p);
+// ForEachChunk callback. If chunk is marked as ignored, adds its address to
+// frontier.
+static void CollectIgnoredCb(uptr chunk, void *arg) {
+  CHECK(arg);
+  chunk = GetUserBegin(chunk);
+  LsanMetadata m(chunk);
   if (m.allocated() && m.tag() == kIgnored)
-    frontier_->push_back(reinterpret_cast<uptr>(p));
+    reinterpret_cast<Frontier *>(arg)->push_back(chunk);
 }
 
-// Set the appropriate tag on each chunk.
+// Sets the appropriate tag on each chunk.
 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
   // Holds the flood fill frontier.
   Frontier frontier(GetPageSizeCached());
@@ -233,14 +235,14 @@ static void ClassifyAllChunks(SuspendedT
   if (flags()->log_pointers)
     Report("Scanning ignored chunks.\n");
   CHECK_EQ(0, frontier.size());
-  ForEachChunk(CollectIgnoredCb(&frontier));
+  ForEachChunk(CollectIgnoredCb, &frontier);
   FloodFillTag(&frontier, kIgnored);
 
   // Iterate over leaked chunks and mark those that are reachable from other
   // leaked chunks.
   if (flags()->log_pointers)
     Report("Scanning leaked chunks.\n");
-  ForEachChunk(MarkIndirectlyLeakedCb());
+  ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
 }
 
 static void PrintStackTraceById(u32 stack_trace_id) {
@@ -251,9 +253,12 @@ static void PrintStackTraceById(u32 stac
                          common_flags()->strip_path_prefix, 0);
 }
 
-void CollectLeaksCb::operator()(void *p) const {
-  p = GetUserBegin(p);
-  LsanMetadata m(p);
+// ForEachChunk callback. Aggregates unreachable chunks into a LeakReport.
+static void CollectLeaksCb(uptr chunk, void *arg) {
+  CHECK(arg);
+  LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
+  chunk = GetUserBegin(chunk);
+  LsanMetadata m(chunk);
   if (!m.allocated()) return;
   if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
     uptr resolution = flags()->resolution;
@@ -261,33 +266,29 @@ void CollectLeaksCb::operator()(void *p)
       uptr size = 0;
       const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
       size = Min(size, resolution);
-      leak_report_->Add(StackDepotPut(trace, size), m.requested_size(),
-                        m.tag());
+      leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag());
     } else {
-      leak_report_->Add(m.stack_trace_id(), m.requested_size(), m.tag());
+      leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag());
     }
   }
 }
 
-static void CollectLeaks(LeakReport *leak_report) {
-  ForEachChunk(CollectLeaksCb(leak_report));
-}
-
-void PrintLeakedCb::operator()(void *p) const {
-  p = GetUserBegin(p);
-  LsanMetadata m(p);
+// ForEachChunkCallback. Prints addresses of unreachable chunks.
+static void PrintLeakedCb(uptr chunk, void *arg) {
+  chunk = GetUserBegin(chunk);
+  LsanMetadata m(chunk);
   if (!m.allocated()) return;
   if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
     Printf("%s leaked %zu byte object at %p.\n",
            m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
-           m.requested_size(), p);
+           m.requested_size(), chunk);
   }
 }
 
 static void PrintLeaked() {
   Printf("\n");
   Printf("Reporting individual objects:\n");
-  ForEachChunk(PrintLeakedCb());
+  ForEachChunk(PrintLeakedCb, 0 /* arg */);
 }
 
 struct DoLeakCheckParam {
@@ -302,7 +303,7 @@ static void DoLeakCheckCallback(const Su
   CHECK(!param->success);
   CHECK(param->leak_report.IsEmpty());
   ClassifyAllChunks(suspended_threads);
-  CollectLeaks(&param->leak_report);
+  ForEachChunk(CollectLeaksCb, &param->leak_report);
   if (!param->leak_report.IsEmpty() && flags()->report_objects)
     PrintLeaked();
   param->success = true;

Modified: compiler-rt/trunk/lib/lsan/lsan_common.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/lsan/lsan_common.h?rev=184700&r1=184699&r2=184700&view=diff
==============================================================================
--- compiler-rt/trunk/lib/lsan/lsan_common.h (original)
+++ compiler-rt/trunk/lib/lsan/lsan_common.h Mon Jun 24 03:34:50 2013
@@ -15,6 +15,7 @@
 #ifndef LSAN_COMMON_H
 #define LSAN_COMMON_H
 
+#include "sanitizer_common/sanitizer_allocator.h"
 #include "sanitizer_common/sanitizer_common.h"
 #include "sanitizer_common/sanitizer_internal_defs.h"
 #include "sanitizer_common/sanitizer_platform.h"
@@ -105,55 +106,6 @@ void ScanRangeForPointers(uptr begin, up
                           Frontier *frontier,
                           const char *region_type, ChunkTag tag);
 
-// Callables for iterating over chunks. Those classes are used as template
-// parameters in ForEachChunk, so we must expose them here to allow for explicit
-// template instantiation.
-
-// Identifies unreachable chunks which must be treated as reachable. Marks them
-// as reachable and adds them to the frontier.
-class ProcessPlatformSpecificAllocationsCb {
- public:
-  explicit ProcessPlatformSpecificAllocationsCb(
-      Frontier *frontier)
-      : frontier_(frontier) {}
-  void operator()(void *p) const;
- private:
-  Frontier *frontier_;
-};
-
-// Prints addresses of unreachable chunks.
-class PrintLeakedCb {
- public:
-  void operator()(void *p) const;
-};
-
-// Aggregates unreachable chunks into a LeakReport.
-class CollectLeaksCb {
- public:
-  explicit CollectLeaksCb(LeakReport *leak_report)
-      : leak_report_(leak_report) {}
-  void operator()(void *p) const;
- private:
-  LeakReport *leak_report_;
-};
-
-// Scans each leaked chunk for pointers to other leaked chunks, and marks each
-// of them as indirectly leaked.
-class MarkIndirectlyLeakedCb {
- public:
-  void operator()(void *p) const;
-};
-
-// Finds all chunk marked as kIgnored and adds their addresses to frontier.
-class CollectIgnoredCb {
- public:
-  explicit CollectIgnoredCb(Frontier *frontier)
-      : frontier_(frontier) {}
-  void operator()(void *p) const;
- private:
-  Frontier *frontier_;
-};
-
 enum IgnoreObjectResult {
   kIgnoreObjectSuccess,
   kIgnoreObjectAlreadyIgnored,
@@ -167,8 +119,8 @@ bool DisabledInThisThread();
 
 // The following must be implemented in the parent tool.
 
-template<typename Callable> void ForEachChunk(Callable const &callback);
-// The address range occupied by the global allocator object.
+void ForEachChunk(ForEachChunkCallback callback, void *arg);
+// Returns the address range occupied by the global allocator object.
 void GetAllocatorGlobalRange(uptr *begin, uptr *end);
 // Wrappers for allocator's ForceLock()/ForceUnlock().
 void LockAllocator();
@@ -179,18 +131,18 @@ void UnlockThreadRegistry();
 bool GetThreadRangesLocked(uptr os_id, uptr *stack_begin, uptr *stack_end,
                            uptr *tls_begin, uptr *tls_end,
                            uptr *cache_begin, uptr *cache_end);
-// If p points into a chunk that has been allocated to the user, return its
-// user-visible address. Otherwise, return 0.
-void *PointsIntoChunk(void *p);
-// Return address of user-visible chunk contained in this allocator chunk.
-void *GetUserBegin(void *p);
+// If p points into a chunk that has been allocated to the user, returns its
+// user-visible address. Otherwise, returns 0.
+uptr PointsIntoChunk(void *p);
+// Returns address of user-visible chunk contained in this allocator chunk.
+uptr GetUserBegin(uptr chunk);
 // Helper for __lsan_ignore_object().
 IgnoreObjectResult IgnoreObjectLocked(const void *p);
 // Wrapper for chunk metadata operations.
 class LsanMetadata {
  public:
-  // Constructor accepts pointer to user-visible chunk.
-  explicit LsanMetadata(void *chunk);
+  // Constructor accepts address of user-visible chunk.
+  explicit LsanMetadata(uptr chunk);
   bool allocated() const;
   ChunkTag tag() const;
   void set_tag(ChunkTag value);

Modified: compiler-rt/trunk/lib/lsan/lsan_common_linux.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/lsan/lsan_common_linux.cc?rev=184700&r1=184699&r2=184700&view=diff
==============================================================================
--- compiler-rt/trunk/lib/lsan/lsan_common_linux.cc (original)
+++ compiler-rt/trunk/lib/lsan/lsan_common_linux.cc Mon Jun 24 03:34:50 2013
@@ -53,8 +53,7 @@ void InitializePlatformSpecificModules()
 
 static int ProcessGlobalRegionsCallback(struct dl_phdr_info *info, size_t size,
                                         void *data) {
-  Frontier *frontier =
-      reinterpret_cast<Frontier *>(data);
+  Frontier *frontier = reinterpret_cast<Frontier *>(data);
   for (uptr j = 0; j < info->dlpi_phnum; j++) {
     const ElfW(Phdr) *phdr = &(info->dlpi_phdr[j]);
     // We're looking for .data and .bss sections, which reside in writeable,
@@ -82,7 +81,7 @@ static int ProcessGlobalRegionsCallback(
   return 0;
 }
 
-// Scan global variables for heap pointers.
+// Scans global variables for heap pointers.
 void ProcessGlobalRegions(Frontier *frontier) {
   // FIXME: dl_iterate_phdr acquires a linker lock, so we run a risk of
   // deadlocking by running this under StopTheWorld. However, the lock is
@@ -101,23 +100,26 @@ static uptr GetCallerPC(u32 stack_id) {
   return 0;
 }
 
-void ProcessPlatformSpecificAllocationsCb::operator()(void *p) const {
-  p = GetUserBegin(p);
-  LsanMetadata m(p);
+// ForEachChunk callback. Identifies unreachable chunks which must be treated as
+// reachable. Marks them as reachable and adds them to the frontier.
+static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) {
+  CHECK(arg);
+  chunk = GetUserBegin(chunk);
+  LsanMetadata m(chunk);
   if (m.allocated() && m.tag() != kReachable) {
     if (linker->containsAddress(GetCallerPC(m.stack_trace_id()))) {
       m.set_tag(kReachable);
-      frontier_->push_back(reinterpret_cast<uptr>(p));
+      reinterpret_cast<Frontier *>(arg)->push_back(chunk);
     }
   }
 }
 
-// Handle dynamically allocated TLS blocks by treating all chunks allocated from
-// ld-linux.so as reachable.
+// Handles dynamically allocated TLS blocks by treating all chunks allocated
+// from ld-linux.so as reachable.
 void ProcessPlatformSpecificAllocations(Frontier *frontier) {
   if (!flags()->use_tls) return;
   if (!linker) return;
-  ForEachChunk(ProcessPlatformSpecificAllocationsCb(frontier));
+  ForEachChunk(ProcessPlatformSpecificAllocationsCb, frontier);
 }
 
 }  // namespace __lsan

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h?rev=184700&r1=184699&r2=184700&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h Mon Jun 24 03:34:50 2013
@@ -279,6 +279,9 @@ struct NoOpMapUnmapCallback {
   void OnUnmap(uptr p, uptr size) const { }
 };
 
+// Callback type for iterating over chunks.
+typedef void (*ForEachChunkCallback)(uptr chunk, void *arg);
+
 // SizeClassAllocator64 -- allocator for 64-bit address space.
 //
 // Space: a portion of address space of kSpaceSize bytes starting at
@@ -433,20 +436,18 @@ class SizeClassAllocator64 {
     }
   }
 
-  // Iterate over existing chunks. May include chunks that are not currently
-  // allocated to the user (e.g. freed).
-  // The caller is expected to call ForceLock() before calling this function.
-  template<typename Callable>
-  void ForEachChunk(const Callable &callback) {
+  // Iterate over all existing chunks.
+  // The allocator must be locked when calling this function.
+  void ForEachChunk(ForEachChunkCallback callback, void *arg) {
     for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
       RegionInfo *region = GetRegionInfo(class_id);
       uptr chunk_size = SizeClassMap::Size(class_id);
       uptr region_beg = kSpaceBeg + class_id * kRegionSize;
-      for (uptr p = region_beg;
-           p < region_beg + region->allocated_user;
-           p += chunk_size) {
-        // Too slow: CHECK_EQ((void *)p, GetBlockBegin((void *)p));
-        callback((void *)p);
+      for (uptr chunk = region_beg;
+           chunk < region_beg + region->allocated_user;
+           chunk += chunk_size) {
+        // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
+        callback(chunk, arg);
       }
     }
   }
@@ -726,21 +727,19 @@ class SizeClassAllocator32 {
     }
   }
 
-  // Iterate over existing chunks. May include chunks that are not currently
-  // allocated to the user (e.g. freed).
-  // The caller is expected to call ForceLock() before calling this function.
-  template<typename Callable>
-  void ForEachChunk(const Callable &callback) {
+  // Iterate over all existing chunks.
+  // The allocator must be locked when calling this function.
+  void ForEachChunk(ForEachChunkCallback callback, void *arg) {
     for (uptr region = 0; region < kNumPossibleRegions; region++)
       if (possible_regions[region]) {
         uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
         uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
         uptr region_beg = region * kRegionSize;
-        for (uptr p = region_beg;
-             p < region_beg + max_chunks_in_region * chunk_size;
-             p += chunk_size) {
-          // Too slow: CHECK_EQ((void *)p, GetBlockBegin((void *)p));
-          callback((void *)p);
+        for (uptr chunk = region_beg;
+             chunk < region_beg + max_chunks_in_region * chunk_size;
+             chunk += chunk_size) {
+          // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
+          callback(chunk, arg);
         }
       }
   }
@@ -1108,13 +1107,11 @@ class LargeMmapAllocator {
     mutex_.Unlock();
   }
 
-  // Iterate over existing chunks. May include chunks that are not currently
-  // allocated to the user (e.g. freed).
-  // The caller is expected to call ForceLock() before calling this function.
-  template<typename Callable>
-  void ForEachChunk(const Callable &callback) {
+  // Iterate over all existing chunks.
+  // The allocator must be locked when calling this function.
+  void ForEachChunk(ForEachChunkCallback callback, void *arg) {
     for (uptr i = 0; i < n_chunks_; i++)
-      callback(GetUser(chunks_[i]));
+      callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
   }
 
  private:
@@ -1290,13 +1287,11 @@ class CombinedAllocator {
     primary_.ForceUnlock();
   }
 
-  // Iterate over existing chunks. May include chunks that are not currently
-  // allocated to the user (e.g. freed).
-  // The caller is expected to call ForceLock() before calling this function.
-  template<typename Callable>
-  void ForEachChunk(const Callable &callback) {
-    primary_.ForEachChunk(callback);
-    secondary_.ForEachChunk(callback);
+  // Iterate over all existing chunks.
+  // The allocator must be locked when calling this function.
+  void ForEachChunk(ForEachChunkCallback callback, void *arg) {
+    primary_.ForEachChunk(callback, arg);
+    secondary_.ForEachChunk(callback, arg);
   }
 
  private:

Modified: compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc?rev=184700&r1=184699&r2=184700&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc Mon Jun 24 03:34:50 2013
@@ -635,15 +635,8 @@ TEST(Allocator, ScopedBuffer) {
   }
 }
 
-class IterationTestCallback {
- public:
-  explicit IterationTestCallback(std::set<void *> *chunks)
-    : chunks_(chunks) {}
-  void operator()(void *chunk) const {
-    chunks_->insert(chunk);
-  }
- private:
-  std::set<void *> *chunks_;
+void IterationTestCallback(uptr chunk, void *arg) {
+  reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
 };
 
 template <class Allocator>
@@ -673,15 +666,15 @@ void TestSizeClassAllocatorIteration() {
     }
   }
 
-  std::set<void *> reported_chunks;
-  IterationTestCallback callback(&reported_chunks);
+  std::set<uptr> reported_chunks;
   a->ForceLock();
-  a->ForEachChunk(callback);
+  a->ForEachChunk(IterationTestCallback, &reported_chunks);
   a->ForceUnlock();
 
   for (uptr i = 0; i < allocated.size(); i++) {
     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
-    ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
+    ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
+              reported_chunks.end());
   }
 
   a->TestOnlyUnmap();
@@ -711,15 +704,15 @@ TEST(SanitizerCommon, LargeMmapAllocator
   for (uptr i = 0; i < kNumAllocs; i++)
     allocated[i] = (char *)a.Allocate(&stats, size, 1);
 
-  std::set<void *> reported_chunks;
-  IterationTestCallback callback(&reported_chunks);
+  std::set<uptr> reported_chunks;
   a.ForceLock();
-  a.ForEachChunk(callback);
+  a.ForEachChunk(IterationTestCallback, &reported_chunks);
   a.ForceUnlock();
 
   for (uptr i = 0; i < kNumAllocs; i++) {
     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
-    ASSERT_NE(reported_chunks.find(allocated[i]), reported_chunks.end());
+    ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
+              reported_chunks.end());
   }
   for (uptr i = 0; i < kNumAllocs; i++)
     a.Deallocate(&stats, allocated[i]);





More information about the llvm-commits mailing list