[PATCH] Add ForEachChunk() to sanitizer allocators.

Sergey Matveev earthdok at google.com
Thu Mar 14 05:25:23 PDT 2013


Probably no reason other than my lack of familiarity with C++ idioms. Is
this what you had in mind?


On Wed, Mar 13, 2013 at 7:28 PM, David Blaikie <dblaikie at gmail.com> wrote:

>
> On Mar 13, 2013 8:20 AM, "Sergey Matveev" <earthdok at google.com> wrote:
> >
> > Hi kcc, glider, samsonov,
> >
> > ForEachChunk() iterates over known chunks, passing each of them to the
> > callback.
> >
> > http://llvm-reviews.chandlerc.com/D539
> >
> > Files:
> >   lib/sanitizer_common/sanitizer_allocator.h
> >   lib/sanitizer_common/tests/sanitizer_allocator_test.cc
> >
> > Index: lib/sanitizer_common/sanitizer_allocator.h
> > ===================================================================
> > --- lib/sanitizer_common/sanitizer_allocator.h
> > +++ lib/sanitizer_common/sanitizer_allocator.h
> > @@ -433,6 +433,24 @@
> >      }
> >    }
> >
> > +  // Iterate over existing chunks. May include chunks that are not
> currently
> > +  // allocated to the user (e.g. freed).
> > +  // The caller is expected to do a ForceLock() before calling this
> function.
> > +  void ForEachChunk(void (*callback)(void *p, void *arg), void
> *argument) {
>
> This is a rather C-ish API. Any reason it's done this way rather than more
> idiomatically C++ with a template and a Callable? (The immediately obvious
> benefit being type safety, though beyond that this sort of API would be
> even more convenient from C++11 (lambdas, std::function, etc))
>
> > +    for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
> > +      RegionInfo *region = GetRegionInfo(class_id);
> > +      uptr chunk_size = SizeClassMap::Size(class_id);
> > +      uptr region_beg = kSpaceBeg + class_id * kRegionSize;
> > +      for (uptr p = region_beg;
> > +           p < region_beg + region->allocated_user;
> > +           p += chunk_size)
> > +      {
> > +        // Too slow: CHECK_EQ((void *)p, GetBlockBegin((void *)p));
> > +        callback((void *)p, argument);
> > +      }
> > +    }
> > +  }
> > +
> >    typedef SizeClassMap SizeClassMapT;
> >    static const uptr kNumClasses = SizeClassMap::kNumClasses;
> >    static const uptr kNumClassesRounded =
> SizeClassMap::kNumClassesRounded;
> > @@ -681,6 +699,25 @@
> >      }
> >    }
> >
> > +  // Iterate over existing chunks. May include chunks that are not
> currently
> > +  // allocated to the user (e.g. freed).
> > +  // The caller is expected to do a ForceLock() before calling this
> function.
> > +  void ForEachChunk(void (*callback)(void *p, void *arg), void
> *argument) {
> > +    for (uptr region = 0; region < kNumPossibleRegions; region++)
> > +      if (state_->possible_regions[region]) {
> > +        uptr chunk_size =
> SizeClassMap::Size(state_->possible_regions[region]);
> > +        uptr max_chunks_in_region = kRegionSize / (chunk_size +
> kMetadataSize);
> > +        uptr region_beg = region * kRegionSize;
> > +        for (uptr p = region_beg;
> > +             p < region_beg + max_chunks_in_region * chunk_size;
> > +             p += chunk_size)
> > +        {
> > +          // Too slow: CHECK_EQ((void *)p, GetBlockBegin((void *)p));
> > +          callback((void *)p, argument);
> > +        }
> > +      }
> > +  }
> > +
> >    void PrintStats() {
> >    }
> >
> > @@ -1005,6 +1042,14 @@
> >      mutex_.Unlock();
> >    }
> >
> > +  // Iterate over existing chunks. May include chunks that are not
> currently
> > +  // allocated to the user (e.g. freed).
> > +  // The caller is expected to do a ForceLock() before calling this
> function.
> > +  void ForEachChunk(void (*callback)(void *p, void *arg), void
> *argument) {
> > +    for (uptr i = 0; i < n_chunks_; i++)
> > +      callback(GetUser(chunks_[i]), argument);
> > +  }
> > +
> >   private:
> >    static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
> >    struct Header {
> > @@ -1168,6 +1213,14 @@
> >      primary_.ForceUnlock();
> >    }
> >
> > +  // Iterate over existing chunks. May include chunks that are not
> currently
> > +  // allocated to the user (e.g. freed).
> > +  // The caller is expected to do a ForceLock() before calling this
> function.
> > +  void ForEachChunk(void (*callback)(void *p, void *arg), void
> *argument) {
> > +    primary_.ForEachChunk(callback, argument);
> > +    secondary_.ForEachChunk(callback, argument);
> > +  }
> > +
> >   private:
> >    PrimaryAllocator primary_;
> >    SecondaryAllocator secondary_;
> > Index: lib/sanitizer_common/tests/sanitizer_allocator_test.cc
> > ===================================================================
> > --- lib/sanitizer_common/tests/sanitizer_allocator_test.cc
> > +++ lib/sanitizer_common/tests/sanitizer_allocator_test.cc
> > @@ -22,6 +22,7 @@
> >  #include <pthread.h>
> >  #include <algorithm>
> >  #include <vector>
> > +#include <set>
> >
> >  // Too slow for debug build
> >  #if TSAN_DEBUG == 0
> > @@ -565,4 +566,88 @@
> >    }
> >  }
> >
> > +void IterationTestCallback(void *chunk, void *argument) {
> > +  std::set<void *> *chunks = reinterpret_cast<std::set<void *>
> *>(argument);
> > +  chunks->insert(chunk);
> > +}
> > +
> > +template <class Allocator>
> > +void TestSizeClassAllocatorIteration() {
> > +  Allocator *a = new Allocator;
> > +  a->Init();
> > +  SizeClassAllocatorLocalCache<Allocator> cache;
> > +  memset(&cache, 0, sizeof(cache));
> > +  cache.Init(0);
> > +
> > +  static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
> > +    50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
> > +
> > +  std::vector<void *> allocated;
> > +
> > +  // Allocate a bunch of chunks.
> > +  for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
> > +    uptr size = sizes[s];
> > +    if (!a->CanAllocate(size, 1)) continue;
> > +    // printf("s = %ld\n", size);
> > +    uptr n_iter = std::max((uptr)6, 80000 / size);
> > +    // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
> > +    for (uptr j = 0; j < n_iter; j++) {
> > +      uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
> > +      void *x = cache.Allocate(a, class_id0);
> > +      allocated.push_back(x);
> > +    }
> > +  }
> > +
> > +  std::set<void *> reported_chunks;
> > +  a->ForceLock();
> > +  a->ForEachChunk(IterationTestCallback,
> > +                  reinterpret_cast<void *>(&reported_chunks));
> > +  a->ForceUnlock();
> > +
> > +  for (uptr i = 0; i < allocated.size(); i++) {
> > +    // Don't use EXPECT_NE. Reporting the first mismatch is enough.
> > +    ASSERT_NE(reported_chunks.find(allocated[i]),
> reported_chunks.end());
> > +  }
> > +
> > +  a->TestOnlyUnmap();
> > +  delete a;
> > +}
> > +
> > +#if SANITIZER_WORDSIZE == 64
> > +TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
> > +  TestSizeClassAllocatorIteration<Allocator64>();
> > +}
> > +#endif
> > +
> > +TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
> > +  TestSizeClassAllocatorIteration<Allocator32Compact>();
> > +}
> > +
> > +
> > +TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
> > +  LargeMmapAllocator<> a;
> > +  a.Init();
> > +  AllocatorStats stats;
> > +  stats.Init();
> > +
> > +  static const int kNumAllocs = 1000;
> > +  char *allocated[kNumAllocs];
> > +  static const uptr size = 40;
> > +  // Allocate some.
> > +  for (int i = 0; i < kNumAllocs; i++) {
> > +    allocated[i] = (char *)a.Allocate(&stats, size, 1);
> > +  }
> > +
> > +  std::set<void *> reported_chunks;
> > +  a.ForceLock();
> > +  a.ForEachChunk(IterationTestCallback,
> > +                  reinterpret_cast<void *>(&reported_chunks));
> > +  a.ForceUnlock();
> > +
> > +  for (uptr i = 0; i < kNumAllocs; i++) {
> > +    // Don't use EXPECT_NE. Reporting the first mismatch is enough.
> > +    ASSERT_NE(reported_chunks.find(allocated[i]),
> reported_chunks.end());
> > +  }
> > +}
> > +
> >  #endif  // #if TSAN_DEBUG==0
> >
> > _______________________________________________
> > llvm-commits mailing list
> > llvm-commits at cs.uiuc.edu
> > http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
> >
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20130314/a0ad104a/attachment.html>


More information about the llvm-commits mailing list