[compiler-rt] r277899 - [sanitizer] allocator: move TransferBatch into SizeClassAllocator64/SizeClassAllocator32 because we actually need different iplementations for the 64- and 32-bit case. NFC; the following patches will make the TransferBatch implementations differ
Kostya Serebryany via llvm-commits
llvm-commits at lists.llvm.org
Fri Aug 5 18:24:12 PDT 2016
Author: kcc
Date: Fri Aug 5 20:24:11 2016
New Revision: 277899
URL: http://llvm.org/viewvc/llvm-project?rev=277899&view=rev
Log:
[sanitizer] allocator: move TransferBatch into SizeClassAllocator64/SizeClassAllocator32 because we actually need different iplementations for the 64- and 32-bit case. NFC; the following patches will make the TransferBatch implementations differ
Modified:
compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h
compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h
compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary64.h
compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h?rev=277899&r1=277898&r2=277899&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h Fri Aug 5 20:24:11 2016
@@ -20,6 +20,7 @@
template<class SizeClassAllocator>
struct SizeClassAllocatorLocalCache {
typedef SizeClassAllocator Allocator;
+ typedef typename Allocator::TransferBatch TransferBatch;
static const uptr kNumClasses = SizeClassAllocator::kNumClasses;
void Init(AllocatorGlobalStats *s) {
@@ -37,7 +38,7 @@ struct SizeClassAllocatorLocalCache {
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
- stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
+ stats_.Add(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0))
Refill(allocator, class_id);
@@ -52,7 +53,7 @@ struct SizeClassAllocatorLocalCache {
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
- stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
+ stats_.Sub(AllocatorStatAllocated, Allocator::ClassIdToSize(class_id));
PerClass *c = &per_class_[class_id];
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
@@ -70,7 +71,6 @@ struct SizeClassAllocatorLocalCache {
// private:
typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;
- typedef typename SizeClassMap::TransferBatch Batch;
struct PerClass {
uptr count;
uptr max_count;
@@ -88,27 +88,57 @@ struct SizeClassAllocatorLocalCache {
}
}
- // Returns a Batch suitable for class_id.
+ // TransferBatch class is declared in SizeClassAllocator.
+ // We transfer chunks between central and thread-local free lists in batches.
+ // For small size classes we allocate batches separately.
+ // For large size classes we may use one of the chunks to store the batch.
+ // sizeof(TransferBatch) must be a power of 2 for more efficient allocation.
+
+ // If kUseSeparateSizeClassForBatch is true,
+ // all TransferBatch objects are allocated from kBatchClassID
+ // size class (except for those that are needed for kBatchClassID itself).
+ // The goal is to have TransferBatches in a totally different region of RAM
+ // to improve security and allow more efficient RAM reclamation.
+ // This is experimental and may currently increase memory usage by up to 3%
+ // in extreme cases.
+ static const bool kUseSeparateSizeClassForBatch = false;
+
+ static uptr SizeClassForTransferBatch(uptr class_id) {
+ if (kUseSeparateSizeClassForBatch)
+ return class_id == SizeClassMap::kBatchClassID
+ ? 0
+ : SizeClassMap::kBatchClassID;
+ if (Allocator::ClassIdToSize(class_id) <
+ sizeof(TransferBatch) -
+ sizeof(uptr) * (SizeClassMap::kMaxNumCached -
+ SizeClassMap::MaxCached(class_id)))
+ return SizeClassMap::ClassID(sizeof(TransferBatch));
+ return 0;
+ }
+
+ // Returns a TransferBatch suitable for class_id.
// For small size classes allocates the batch from the allocator.
// For large size classes simply returns b.
- Batch *CreateBatch(uptr class_id, SizeClassAllocator *allocator, Batch *b) {
- if (uptr batch_class_id = SizeClassMap::SizeClassForTransferBatch(class_id))
- return (Batch*)Allocate(allocator, batch_class_id);
+ TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
+ return (TransferBatch*)Allocate(allocator, batch_class_id);
return b;
}
- // Destroys Batch b.
+ // Destroys TransferBatch b.
// For small size classes deallocates b to the allocator.
// Does notthing for large size classes.
- void DestroyBatch(uptr class_id, SizeClassAllocator *allocator, Batch *b) {
- if (uptr batch_class_id = SizeClassMap::SizeClassForTransferBatch(class_id))
+ void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,
+ TransferBatch *b) {
+ if (uptr batch_class_id = SizeClassForTransferBatch(class_id))
Deallocate(allocator, batch_class_id, b);
}
NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {
InitCache();
PerClass *c = &per_class_[class_id];
- Batch *b = allocator->AllocateBatch(&stats_, this, class_id);
+ TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id);
CHECK_GT(b->Count(), 0);
for (uptr i = 0; i < b->Count(); i++)
c->batch[i] = b->Get(i);
@@ -121,8 +151,8 @@ struct SizeClassAllocatorLocalCache {
PerClass *c = &per_class_[class_id];
uptr cnt = Min(c->max_count / 2, c->count);
uptr first_idx_to_drain = c->count - cnt;
- Batch *b =
- CreateBatch(class_id, allocator, (Batch *)c->batch[first_idx_to_drain]);
+ TransferBatch *b = CreateBatch(
+ class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]);
b->SetFromArray(&c->batch[first_idx_to_drain], cnt);
c->count -= cnt;
allocator->DeallocateBatch(&stats_, class_id, b);
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h?rev=277899&r1=277898&r2=277899&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h Fri Aug 5 20:24:11 2016
@@ -41,7 +41,40 @@ template <const uptr kSpaceBeg, const u6
class MapUnmapCallback = NoOpMapUnmapCallback>
class SizeClassAllocator32 {
public:
- typedef typename SizeClassMap::TransferBatch Batch;
+ struct TransferBatch {
+ static const uptr kMaxNumCached = SizeClassMap::kMaxNumCached;
+ void SetFromArray(void *batch[], uptr count) {
+ count_ = count;
+ CHECK_LE(count_, kMaxNumCached);
+ for (uptr i = 0; i < count; i++)
+ batch_[i] = batch[i];
+ }
+ void *Get(uptr idx) {
+ CHECK_LT(idx, count_);
+ return batch_[idx];
+ }
+ uptr Count() const { return count_; }
+ void Clear() { count_ = 0; }
+ void Add(void *ptr) {
+ batch_[count_++] = ptr;
+ CHECK_LE(count_, kMaxNumCached);
+ }
+ TransferBatch *next;
+
+ private:
+ uptr count_;
+ void *batch_[kMaxNumCached];
+ };
+
+ static const uptr kBatchSize = sizeof(TransferBatch);
+ COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
+
+ static uptr ClassIdToSize(uptr class_id) {
+ return class_id == SizeClassMap::kBatchClassID
+ ? sizeof(TransferBatch)
+ : SizeClassMap::Size(class_id);
+ }
+
typedef SizeClassAllocator32<kSpaceBeg, kSpaceSize, kMetadataSize,
SizeClassMap, kRegionSizeLog, ByteMap, MapUnmapCallback> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
@@ -72,27 +105,28 @@ class SizeClassAllocator32 {
CHECK(PointerIsMine(p));
uptr mem = reinterpret_cast<uptr>(p);
uptr beg = ComputeRegionBeg(mem);
- uptr size = SizeClassMap::Size(GetSizeClass(p));
+ uptr size = ClassIdToSize(GetSizeClass(p));
u32 offset = mem - beg;
uptr n = offset / (u32)size; // 32-bit division
uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
return reinterpret_cast<void*>(meta);
}
- NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
- uptr class_id) {
+ NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
+ uptr class_id) {
CHECK_LT(class_id, kNumClasses);
SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex);
if (sci->free_list.empty())
PopulateFreeList(stat, c, sci, class_id);
CHECK(!sci->free_list.empty());
- Batch *b = sci->free_list.front();
+ TransferBatch *b = sci->free_list.front();
sci->free_list.pop_front();
return b;
}
- NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
+ NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
+ TransferBatch *b) {
CHECK_LT(class_id, kNumClasses);
SizeClassInfo *sci = GetSizeClassInfo(class_id);
SpinMutexLock l(&sci->mutex);
@@ -115,7 +149,7 @@ class SizeClassAllocator32 {
CHECK(PointerIsMine(p));
uptr mem = reinterpret_cast<uptr>(p);
uptr beg = ComputeRegionBeg(mem);
- uptr size = SizeClassMap::Size(GetSizeClass(p));
+ uptr size = ClassIdToSize(GetSizeClass(p));
u32 offset = mem - beg;
u32 n = offset / (u32)size; // 32-bit division
uptr res = beg + (n * (u32)size);
@@ -124,7 +158,7 @@ class SizeClassAllocator32 {
uptr GetActuallyAllocatedSize(void *p) {
CHECK(PointerIsMine(p));
- return SizeClassMap::Size(GetSizeClass(p));
+ return ClassIdToSize(GetSizeClass(p));
}
uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
@@ -163,7 +197,7 @@ class SizeClassAllocator32 {
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
for (uptr region = 0; region < kNumPossibleRegions; region++)
if (possible_regions[region]) {
- uptr chunk_size = SizeClassMap::Size(possible_regions[region]);
+ uptr chunk_size = ClassIdToSize(possible_regions[region]);
uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
uptr region_beg = region * kRegionSize;
for (uptr chunk = region_beg;
@@ -191,8 +225,9 @@ class SizeClassAllocator32 {
struct SizeClassInfo {
SpinMutex mutex;
- IntrusiveList<Batch> free_list;
- char padding[kCacheLineSize - sizeof(uptr) - sizeof(IntrusiveList<Batch>)];
+ IntrusiveList<TransferBatch> free_list;
+ char padding[kCacheLineSize - sizeof(uptr) -
+ sizeof(IntrusiveList<TransferBatch>)];
};
COMPILER_CHECK(sizeof(SizeClassInfo) == kCacheLineSize);
@@ -224,14 +259,14 @@ class SizeClassAllocator32 {
void PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
SizeClassInfo *sci, uptr class_id) {
- uptr size = SizeClassMap::Size(class_id);
+ uptr size = ClassIdToSize(class_id);
uptr reg = AllocateRegion(stat, class_id);
uptr n_chunks = kRegionSize / (size + kMetadataSize);
uptr max_count = SizeClassMap::MaxCached(class_id);
- Batch *b = nullptr;
+ TransferBatch *b = nullptr;
for (uptr i = reg; i < reg + n_chunks * size; i += size) {
if (!b) {
- b = c->CreateBatch(class_id, this, (Batch*)i);
+ b = c->CreateBatch(class_id, this, (TransferBatch*)i);
b->Clear();
}
b->Add((void*)i);
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary64.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary64.h?rev=277899&r1=277898&r2=277899&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary64.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary64.h Fri Aug 5 20:24:11 2016
@@ -36,7 +36,40 @@ template <const uptr kSpaceBeg, const up
class MapUnmapCallback = NoOpMapUnmapCallback>
class SizeClassAllocator64 {
public:
- typedef typename SizeClassMap::TransferBatch Batch;
+ struct TransferBatch {
+ static const uptr kMaxNumCached = SizeClassMap::kMaxNumCached;
+ void SetFromRange(uptr region_beg, uptr beg_offset, uptr step, uptr count) {
+ count_ = count;
+ CHECK_LE(count_, kMaxNumCached);
+ for (uptr i = 0; i < count; i++)
+ batch_[i] = (void*)(region_beg + beg_offset + i * step);
+ }
+ void SetFromArray(void *batch[], uptr count) {
+ count_ = count;
+ CHECK_LE(count_, kMaxNumCached);
+ for (uptr i = 0; i < count; i++)
+ batch_[i] = batch[i];
+ }
+ void *Get(uptr idx) {
+ CHECK_LT(idx, count_);
+ return batch_[idx];
+ }
+ uptr Count() const { return count_; }
+ TransferBatch *next;
+
+ private:
+ uptr count_;
+ void *batch_[kMaxNumCached];
+ };
+ static const uptr kBatchSize = sizeof(TransferBatch);
+ COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
+
+ static uptr ClassIdToSize(uptr class_id) {
+ return class_id == SizeClassMap::kBatchClassID
+ ? sizeof(TransferBatch)
+ : SizeClassMap::Size(class_id);
+ }
+
typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize,
SizeClassMap, MapUnmapCallback> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> AllocatorCache;
@@ -69,18 +102,19 @@ class SizeClassAllocator64 {
alignment <= SizeClassMap::kMaxSize;
}
- NOINLINE Batch* AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
- uptr class_id) {
+ NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
+ uptr class_id) {
CHECK_LT(class_id, kNumClasses);
RegionInfo *region = GetRegionInfo(class_id);
- Batch *b = region->free_list.Pop();
+ TransferBatch *b = region->free_list.Pop();
if (!b)
b = PopulateFreeList(stat, c, class_id, region);
region->n_allocated += b->Count();
return b;
}
- NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id, Batch *b) {
+ NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
+ TransferBatch *b) {
RegionInfo *region = GetRegionInfo(class_id);
CHECK_GT(b->Count(), 0);
region->free_list.Push(b);
@@ -111,7 +145,7 @@ class SizeClassAllocator64 {
void *GetBlockBegin(const void *p) {
uptr class_id = GetSizeClass(p);
- uptr size = SizeClassMap::Size(class_id);
+ uptr size = ClassIdToSize(class_id);
if (!size) return nullptr;
uptr chunk_idx = GetChunkIdx((uptr)p, size);
uptr reg_beg = GetRegionBegin(p);
@@ -126,14 +160,14 @@ class SizeClassAllocator64 {
uptr GetActuallyAllocatedSize(void *p) {
CHECK(PointerIsMine(p));
- return SizeClassMap::Size(GetSizeClass(p));
+ return ClassIdToSize(GetSizeClass(p));
}
uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
void *GetMetaData(const void *p) {
uptr class_id = GetSizeClass(p);
- uptr size = SizeClassMap::Size(class_id);
+ uptr size = ClassIdToSize(class_id);
uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
return reinterpret_cast<void *>(SpaceBeg() +
(kRegionSize * (class_id + 1)) -
@@ -180,11 +214,11 @@ class SizeClassAllocator64 {
RegionInfo *region = GetRegionInfo(class_id);
if (region->mapped_user == 0) continue;
uptr in_use = region->n_allocated - region->n_freed;
- uptr avail_chunks = region->allocated_user / SizeClassMap::Size(class_id);
+ uptr avail_chunks = region->allocated_user / ClassIdToSize(class_id);
Printf(" %02zd (%zd): mapped: %zdK allocs: %zd frees: %zd inuse: %zd"
" avail: %zd rss: %zdK\n",
class_id,
- SizeClassMap::Size(class_id),
+ ClassIdToSize(class_id),
region->mapped_user >> 10,
region->n_allocated,
region->n_freed,
@@ -212,7 +246,7 @@ class SizeClassAllocator64 {
void ForEachChunk(ForEachChunkCallback callback, void *arg) {
for (uptr class_id = 1; class_id < kNumClasses; class_id++) {
RegionInfo *region = GetRegionInfo(class_id);
- uptr chunk_size = SizeClassMap::Size(class_id);
+ uptr chunk_size = ClassIdToSize(class_id);
uptr region_beg = SpaceBeg() + class_id * kRegionSize;
for (uptr chunk = region_beg;
chunk < region_beg + region->allocated_user;
@@ -250,7 +284,7 @@ class SizeClassAllocator64 {
struct RegionInfo {
BlockingMutex mutex;
- LFStack<Batch> free_list;
+ LFStack<TransferBatch> free_list;
uptr allocated_user; // Bytes allocated for user memory.
uptr allocated_meta; // Bytes allocated for metadata.
uptr mapped_user; // Bytes mapped for user memory.
@@ -278,13 +312,14 @@ class SizeClassAllocator64 {
return (u32)offset / (u32)size;
}
- NOINLINE Batch* PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
- uptr class_id, RegionInfo *region) {
+ NOINLINE TransferBatch *PopulateFreeList(AllocatorStats *stat,
+ AllocatorCache *c, uptr class_id,
+ RegionInfo *region) {
BlockingMutexLock l(®ion->mutex);
- Batch *b = region->free_list.Pop();
+ TransferBatch *b = region->free_list.Pop();
if (b)
return b;
- uptr size = SizeClassMap::Size(class_id);
+ uptr size = ClassIdToSize(class_id);
uptr count = SizeClassMap::MaxCached(class_id);
uptr beg_idx = region->allocated_user;
uptr end_idx = beg_idx + count * size;
@@ -320,7 +355,8 @@ class SizeClassAllocator64 {
Die();
}
for (;;) {
- b = c->CreateBatch(class_id, this, (Batch*)(region_beg + beg_idx));
+ b = c->CreateBatch(class_id, this,
+ (TransferBatch *)(region_beg + beg_idx));
b->SetFromRange(region_beg, beg_idx, size, count);
region->allocated_user += count * size;
CHECK_LE(region->allocated_user, region->mapped_user);
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_size_class_map.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_size_class_map.h?rev=277899&r1=277898&r2=277899&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_size_class_map.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_size_class_map.h Fri Aug 5 20:24:11 2016
@@ -91,55 +91,12 @@ class SizeClassMap {
public:
static const uptr kMaxNumCached = kMaxNumCachedT;
COMPILER_CHECK(((kMaxNumCached + 2) & (kMaxNumCached + 1)) == 0);
- // We transfer chunks between central and thread-local free lists in batches.
- // For small size classes we allocate batches separately.
- // For large size classes we use one of the chunks to store the batch.
- // sizeof(TransferBatch) must be a power of 2 for more efficient allocation.
- struct TransferBatch {
- void SetFromRange(uptr region_beg, uptr beg_offset, uptr step, uptr count) {
- count_ = count;
- CHECK_LE(count_, kMaxNumCached);
- for (uptr i = 0; i < count; i++)
- batch_[i] = (void*)(region_beg + beg_offset + i * step);
- }
- void SetFromArray(void *batch[], uptr count) {
- count_ = count;
- CHECK_LE(count_, kMaxNumCached);
- for (uptr i = 0; i < count; i++)
- batch_[i] = batch[i];
- }
- void *Get(uptr idx) {
- CHECK_LT(idx, count_);
- return batch_[idx];
- }
- uptr Count() const { return count_; }
- void Clear() { count_ = 0; }
- void Add(void *ptr) {
- batch_[count_++] = ptr;
- CHECK_LE(count_, kMaxNumCached);
- }
- TransferBatch *next;
-
- private:
- uptr count_;
- void *batch_[kMaxNumCached];
- };
- static const uptr kBatchSize = sizeof(TransferBatch);
- COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
-
- // If true, all TransferBatch objects are allocated from kBatchClassID
- // size class (except for those that are needed for kBatchClassID itself).
- // The goal is to have TransferBatches in a totally different region of RAM
- // to improve security and allow more efficient RAM reclamation.
- // This is experimental and may currently increase memory usage by up to 3%
- // in extreme cases.
- static const bool kUseSeparateSizeClassForBatch = false;
-
static const uptr kMaxSize = 1UL << kMaxSizeLog;
static const uptr kNumClasses =
kMidClass + ((kMaxSizeLog - kMidSizeLog) << S) + 1 + 1;
static const uptr kBatchClassID = kNumClasses - 1;
+ static const uptr kLargestClassID = kNumClasses - 2;
COMPILER_CHECK(kNumClasses >= 32 && kNumClasses <= 256);
static const uptr kNumClassesRounded =
kNumClasses == 32 ? 32 :
@@ -149,8 +106,8 @@ class SizeClassMap {
static uptr Size(uptr class_id) {
if (class_id <= kMidClass)
return kMinSize * class_id;
- if (class_id == kBatchClassID)
- return kBatchSize;
+ // Should not pass kBatchClassID here, but we should avoid a CHECK.
+ if (class_id == kBatchClassID) return 0;
class_id -= kMidClass;
uptr t = kMidSize << (class_id >> S);
return t + (t >> S) * (class_id & M);
@@ -169,6 +126,11 @@ class SizeClassMap {
static uptr MaxCached(uptr class_id) {
if (class_id == 0) return 0;
+ // Estimate the result for kBatchClassID because this class
+ // does not know the exact size of TransferBatch.
+ // Moreover, we need to cache fewer batches than user chunks,
+ // so this number could be small.
+ if (class_id == kBatchClassID) return Min((uptr)8, kMaxNumCached);
uptr n = (1UL << kMaxBytesCachedLog) / Size(class_id);
return Max<uptr>(1, Min(kMaxNumCached, n));
}
@@ -195,15 +157,6 @@ class SizeClassMap {
Printf("Total cached: %zd\n", total_cached);
}
- static uptr SizeClassForTransferBatch(uptr class_id) {
- if (kUseSeparateSizeClassForBatch)
- return class_id == kBatchClassID ? 0 : kBatchClassID;
- if (Size(class_id) < sizeof(TransferBatch) -
- sizeof(uptr) * (kMaxNumCached - MaxCached(class_id)))
- return ClassID(sizeof(TransferBatch));
- return 0;
- }
-
static void Validate() {
for (uptr c = 1; c < kNumClasses; c++) {
if (c == kBatchClassID) continue;
Modified: compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc?rev=277899&r1=277898&r2=277899&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc Fri Aug 5 20:24:11 2016
@@ -239,7 +239,7 @@ void SizeClassAllocatorGetBlockBeginStre
memset(&cache, 0, sizeof(cache));
cache.Init(0);
- uptr max_size_class = Allocator::kNumClasses - 1;
+ uptr max_size_class = Allocator::SizeClassMapT::kLargestClassID;
uptr size = Allocator::SizeClassMapT::Size(max_size_class);
u64 G8 = 1ULL << 33;
// Make sure we correctly compute GetBlockBegin() w/o overflow.
More information about the llvm-commits
mailing list