[llvm-commits] [compiler-rt] r158991 - in /compiler-rt/trunk/lib/sanitizer_common: sanitizer_allocator64.h sanitizer_common.h tests/sanitizer_allocator64_test.cc

Kostya Serebryany kcc at google.com
Mon Jun 25 07:45:02 PDT 2012


On Mon, Jun 25, 2012 at 5:40 PM, Dmitry Vyukov <dvyukov at google.com> wrote:

> On Fri, Jun 22, 2012 at 7:11 PM, Dmitry Vyukov <dvyukov at google.com> wrote:
>
>> On Fri, Jun 22, 2012 at 5:00 PM, Kostya Serebryany <kcc at google.com>wrote:
>>
>>> Author: kcc
>>> Date: Fri Jun 22 08:00:50 2012
>>> New Revision: 158991
>>>
>>> URL: http://llvm.org/viewvc/llvm-project?rev=158991&view=rev
>>> Log:
>>> [tsan] more code for a specialized tsan allocator
>>>
>>> Modified:
>>>    compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h
>>>    compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h
>>>
>>>  compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
>>>
>>> Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h
>>> URL:
>>> http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h?rev=158991&r1=158990&r2=158991&view=diff
>>>
>>> ==============================================================================
>>> --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h
>>> (original)
>>> +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h Fri
>>> Jun 22 08:00:50 2012
>>> @@ -22,7 +22,7 @@
>>>
>>>  namespace __sanitizer {
>>>
>>> -// Maps size class to size and back.
>>> +// Maps size class id to size and back.
>>>  class DefaultSizeClassMap {
>>>  private:
>>>   // Here we use a spline composed of 5 polynomials of oder 1.
>>> @@ -53,19 +53,20 @@
>>>  public:
>>>   static const uptr kNumClasses = u4 + 1;
>>>   static const uptr kMaxSize = l5;
>>> +  static const uptr kMinSize = l0;
>>>
>>>   COMPILER_CHECK(kNumClasses <= 256);
>>>   COMPILER_CHECK((kMaxSize & (kMaxSize - 1)) == 0);
>>>
>>> -  static uptr Size(uptr size_class) {
>>> -    if (size_class <= u0) return l0 + s0 * (size_class - 0);
>>> -    if (size_class <= u1) return l1 + s1 * (size_class - u0);
>>> -    if (size_class <= u2) return l2 + s2 * (size_class - u1);
>>> -    if (size_class <= u3) return l3 + s3 * (size_class - u2);
>>> -    if (size_class <= u4) return l4 + s4 * (size_class - u3);
>>> +  static uptr Size(uptr class_id) {
>>> +    if (class_id <= u0) return l0 + s0 * (class_id - 0);
>>> +    if (class_id <= u1) return l1 + s1 * (class_id - u0);
>>> +    if (class_id <= u2) return l2 + s2 * (class_id - u1);
>>> +    if (class_id <= u3) return l3 + s3 * (class_id - u2);
>>> +    if (class_id <= u4) return l4 + s4 * (class_id - u3);
>>>     return 0;
>>>   }
>>> -  static uptr Class(uptr size) {
>>> +  static uptr ClassID(uptr size) {
>>>     if (size <= l1) return 0  + (size - l0 + s0 - 1) / s0;
>>>     if (size <= l2) return u0 + (size - l1 + s1 - 1) / s1;
>>>     if (size <= l3) return u1 + (size - l2 + s2 - 1) / s2;
>>> @@ -75,6 +76,129 @@
>>>   }
>>>  };
>>>
>>> +// Space: a portion of address space of kSpaceSize bytes starting at
>>> +// a fixed address (kSpaceBeg). Both constants are powers of two and
>>> +// kSpaceBeg is kSpaceSize-aligned.
>>> +//
>>> +// Region: a part of Space dedicated to a single size class.
>>> +// There are kNumClasses Regions of equal size.
>>> +//
>>> +// UserChunk: a piece of memory returned to user.
>>> +// MetaChunk: kMetadataSize bytes of metadata associated with a
>>> UserChunk.
>>> +//
>>> +// A Region looks like this:
>>> +// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
>>> +template <const uptr kSpaceBeg, const uptr kSpaceSize,
>>> +          const uptr kMetadataSize, class SizeClassMap>
>>> +class SizeClassAllocator64 {
>>> + public:
>>> +  void Init() {
>>> +    CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
>>> +             AllocBeg(), AllocSize())));
>>> +  }
>>> +  void *Allocate(uptr size) {
>>> +    CHECK_LE(size, SizeClassMap::kMaxSize);
>>> +    return AllocateBySizeClass(SizeClassMap::ClassID(size));
>>> +  }
>>> +  void Deallocate(void *p) {
>>>
>>
>> It needs to ensure that PointerIsMine(p).
>>
>>
>>> +    DeallocateBySizeClass(p, GetSizeClass(p));
>>> +  }
>>> +  bool PointerIsMine(void *p) {
>>> +    return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg /
>>> kSpaceSize;
>>> +  }
>>> +  uptr GetSizeClass(void *p) {
>>> +    return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
>>> +  }
>>> +
>>> +  uptr TotalMemoryUsedIncludingFreeLists() {
>>>
>>
>> Can we express the public name in public terms? It makes no sense for me
>> as a user. FreeLists are no more than impl detail, so one day we may not
>> have them at all.
>>
>>
>>> +    uptr res = 0;
>>> +    for (uptr i = 0; i < kNumClasses; i++)
>>> +      res += GetRegionInfo(i)->allocated;
>>> +    return res;
>>> +  }
>>> +
>>> +  // Test-only.
>>> +  void TestOnlyUnmap() {
>>> +    UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
>>> +  }
>>> + private:
>>> +  static const uptr kNumClasses = 256;  // Power of two <= 256
>>> +  COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
>>> +  static const uptr kRegionSize = kSpaceSize / kNumClasses;
>>> +  // Populate the free list with at most this number of bytes at once
>>> +  // or with one element if its size is greater.
>>> +  static const uptr kPopulateSize = 1 << 18;
>>> +
>>> +  struct LifoListNode {
>>> +    LifoListNode *next;
>>> +  };
>>> +
>>> +  struct RegionInfo {
>>> +    uptr mutex;  // FIXME
>>> +    LifoListNode *free_list;
>>> +    uptr allocated;
>>> +    char padding[kCacheLineSize -
>>> +                 sizeof(mutex) - sizeof(free_list) - sizeof(allocated)];
>>> +  };
>>> +  COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
>>> +
>>> +  uptr AdditionalSize() { return sizeof(RegionInfo) * kNumClasses; }
>>> +  uptr AllocBeg()  { return kSpaceBeg  - AdditionalSize(); }
>>>
>>
>> Assert that AdditionalSize() % kPageSize == 0. Otherwise mmap check will
>> fail.
>>
>>
>>> +  uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
>>> +
>>> +  RegionInfo *GetRegionInfo(uptr class_id) {
>>> +    CHECK_LT(class_id, kNumClasses);
>>> +    RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg);
>>> +    return &regions[-1 - class_id];
>>> +  }
>>> +
>>> +  void PushLifoList(LifoListNode **list, LifoListNode *node) {
>>> +    node->next = *list;
>>> +    *list = node;
>>> +  }
>>> +
>>> +  LifoListNode *PopLifoList(LifoListNode **list) {
>>> +    LifoListNode *res = *list;
>>> +    *list = (*list)->next;
>>>
>>
>>                       /\/\/\/\/\/\/\
>>                      res->next
>>
>>
>>> +    return res;
>>> +  }
>>> +
>>> +  LifoListNode *PopulateFreeList(uptr class_id, RegionInfo *region) {
>>> +    uptr size = SizeClassMap::Size(class_id);
>>> +    uptr beg_idx = region->allocated;
>>> +    uptr end_idx = beg_idx + kPopulateSize;
>>> +    LifoListNode *res = 0;
>>> +    uptr region_beg = kSpaceBeg + kRegionSize * class_id;
>>> +    uptr idx = beg_idx;
>>> +    do {  // do-while loop because we need to put at least one item.
>>> +      uptr p = region_beg + idx;
>>> +      PushLifoList(&res, reinterpret_cast<LifoListNode*>(p));
>>> +      idx += size;
>>> +    } while (idx < end_idx);
>>> +    CHECK_LT(idx, kRegionSize);
>>>
>>
>> Better to place this check after calculation of end_idx, because at this
>> point we've already corrupted some memory.
>>
>>
>>> +    region->allocated += idx - beg_idx;
>>> +    return res;
>>> +  }
>>> +
>>> +  void *AllocateBySizeClass(uptr class_id) {
>>> +    CHECK_LT(class_id, kNumClasses);
>>> +    RegionInfo *region = GetRegionInfo(class_id);
>>> +    // FIXME: Lock region->mutex;
>>> +    if (!region->free_list) {
>>> +      region->free_list = PopulateFreeList(class_id, region);
>>> +    }
>>> +    CHECK_NE(region->free_list, 0);
>>> +    LifoListNode *node = PopLifoList(&region->free_list);
>>> +    return reinterpret_cast<void*>(node);
>>>
>>
>> static_cast is enough here
>>
>>
>>> +  }
>>> +
>>> +  void DeallocateBySizeClass(void *p, uptr class_id) {
>>> +    RegionInfo *region = GetRegionInfo(class_id);
>>> +    // FIXME: Lock region->mutex;
>>> +    PushLifoList(&region->free_list,
>>> reinterpret_cast<LifoListNode*>(p));
>>>
>>
>> static_cast is enough here
>>
>>
>>> +  }
>>> +};
>>> +
>>>  }  // namespace __sanitizer
>>>
>>>  #endif  // SANITIZER_ALLOCATOR_H
>>>
>>> Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h
>>> URL:
>>> http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h?rev=158991&r1=158990&r2=158991&view=diff
>>>
>>> ==============================================================================
>>> --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h (original)
>>> +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h Fri Jun 22
>>> 08:00:50 2012
>>> @@ -25,6 +25,7 @@
>>>  const uptr kWordSizeInBits = 8 * kWordSize;
>>>  const uptr kPageSizeBits = 12;
>>>  const uptr kPageSize = 1UL << kPageSizeBits;
>>> +const uptr kCacheLineSize = 64;
>>>  #ifndef _WIN32
>>>  const uptr kMmapGranularity = kPageSize;
>>>  #else
>>>
>>> Modified:
>>> compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
>>> URL:
>>> http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc?rev=158991&r1=158990&r2=158991&view=diff
>>>
>>> ==============================================================================
>>> ---
>>> compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
>>> (original)
>>> +++
>>> compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
>>> Fri Jun 22 08:00:50 2012
>>> @@ -11,9 +11,13 @@
>>>  #include "sanitizer_common/sanitizer_allocator64.h"
>>>  #include "gtest/gtest.h"
>>>
>>> +#include <algorithm>
>>> +#include <vector>
>>> +
>>>  TEST(SanitizerCommon, DefaultSizeClassMap) {
>>>   typedef DefaultSizeClassMap SCMap;
>>>
>>> +#if 0
>>>   for (uptr i = 0; i < SCMap::kNumClasses; i++) {
>>>     // printf("% 3ld: % 5ld (%4lx);   ", i, SCMap::Size(i),
>>> SCMap::Size(i));
>>>     printf("c%ld => %ld  ", i, SCMap::Size(i));
>>> @@ -21,23 +25,69 @@
>>>       printf("\n");
>>>   }
>>>   printf("\n");
>>> +#endif
>>>
>>>   for (uptr c = 0; c < SCMap::kNumClasses; c++) {
>>>     uptr s = SCMap::Size(c);
>>> -    CHECK_EQ(SCMap::Class(s), c);
>>> +    CHECK_EQ(SCMap::ClassID(s), c);
>>>     if (c != SCMap::kNumClasses - 1)
>>> -      CHECK_EQ(SCMap::Class(s + 1), c + 1);
>>> -    CHECK_EQ(SCMap::Class(s - 1), c);
>>> +      CHECK_EQ(SCMap::ClassID(s + 1), c + 1);
>>> +    CHECK_EQ(SCMap::ClassID(s - 1), c);
>>>     if (c)
>>>       CHECK_GT(SCMap::Size(c), SCMap::Size(c-1));
>>>   }
>>> -  CHECK_EQ(SCMap::Class(SCMap::kMaxSize + 1), 0);
>>> +  CHECK_EQ(SCMap::ClassID(SCMap::kMaxSize + 1), 0);
>>>
>>>   for (uptr s = 1; s <= SCMap::kMaxSize; s++) {
>>> -    uptr c = SCMap::Class(s);
>>> +    uptr c = SCMap::ClassID(s);
>>>     CHECK_LT(c, SCMap::kNumClasses);
>>>     CHECK_GE(SCMap::Size(c), s);
>>>     if (c > 0)
>>>       CHECK_LT(SCMap::Size(c-1), s);
>>>   }
>>>  }
>>> +
>>> +TEST(SanitizerCommon, SizeClassAllocator64) {
>>> +  const uptr space_beg  = 0x600000000000ULL;
>>> +  const uptr space_size = 0x10000000000;  // 1T
>>> +  const uptr metadata_size = 16;
>>> +  typedef DefaultSizeClassMap SCMap;
>>> +  typedef SizeClassAllocator64<space_beg, space_size,
>>> +                               metadata_size, SCMap> Allocator;
>>> +
>>> +  Allocator a;
>>> +  a.Init();
>>> +
>>> +  static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
>>> +    50000, 60000, 100000, 300000, 500000, 1000000, 2000000};
>>> +
>>> +  std::vector<void *> allocated;
>>> +
>>> +  uptr last_total_allocated = 0;
>>> +  for (int i = 0; i < 5; i++) {
>>> +    // Allocate a bunch of chunks.
>>> +    for (uptr s = 0; s < sizeof(sizes) /sizeof(sizes[0]); s++) {
>>> +      uptr size = sizes[s];
>>> +      // printf("s = %ld\n", size);
>>> +      uptr n_iter = std::max((uptr)2, 1000000 / size);
>>> +      for (uptr i = 0; i < n_iter; i++) {
>>> +        void *x = a.Allocate(size);
>>> +        allocated.push_back(x);
>>> +        CHECK(a.PointerIsMine(x));
>>> +        uptr class_id = a.GetSizeClass(x);
>>> +        CHECK_EQ(class_id, SCMap::ClassID(size));
>>> +      }
>>> +    }
>>> +    // Deallocate all.
>>> +    for (uptr i = 0; i < allocated.size(); i++) {
>>> +      a.Deallocate(allocated[i]);
>>> +    }
>>> +    allocated.clear();
>>> +    uptr total_allocated = a.TotalMemoryUsedIncludingFreeLists();
>>> +    if (last_total_allocated == 0)
>>> +      last_total_allocated = total_allocated;
>>> +    CHECK_EQ(last_total_allocated, total_allocated);
>>> +  }
>>> +
>>> +  a.TestOnlyUnmap();
>>> +}
>>>
>>>
>>> _______________________________________________
>>> llvm-commits mailing list
>>> llvm-commits at cs.uiuc.edu
>>> http://lists.cs.uiuc.edu/mailman/listinfo/llvm-commits
>>>
>>
>>
>
> Bot is broken:
>
> In file included from ../sanitizer_common/tests/sanitizer_allocator64_test.cc:11:
> ../sanitizer_common/sanitizer_allocator64.h:153:25: error: invalid use of non-static data member 'mutex'
>                  sizeof(mutex) - sizeof(free_list) -
>                         ^~~~~
> ../sanitizer_common/sanitizer_allocator64.h:153:41: error: invalid use of non-static data member 'free_list'
>                  sizeof(mutex) - sizeof(free_list) -
>                                         ^~~~~~~~~
> ../sanitizer_common/sanitizer_allocator64.h:154:25: error: invalid use of non-static data member 'allocated_user'
>                  sizeof(allocated_user) - sizeof(allocated_meta)];
>                         ^~~~~~~~~~~~~~
> ../sanitizer_common/sanitizer_allocator64.h:154:50: error: invalid use of non-static data member 'allocated_meta'
>                  sizeof(allocated_user) - sizeof(allocated_meta)];
>
>
Fixed. r159137.
I was building the tests with gcc and did not see this failure.  My bad.

--kcc

>
>                                                  ^~~~~~~~~~~~~~
>
>
>
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20120625/1e9e020f/attachment.html>


More information about the llvm-commits mailing list