<div dir="ltr">Reverted by r279643</div><br><div class="gmail_quote"><div dir="ltr">On Wed, Aug 24, 2016 at 8:21 AM Bill Seurer via llvm-commits <<a href="mailto:llvm-commits@lists.llvm.org">llvm-commits@lists.llvm.org</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">This breaks on powerpc, too. Right now it looks like all the powerpc<br>
bots are failing because of this. r279571 works fine and r279572 causes<br>
the failure.<br>
<br>
On 08/24/16 02:32, Vitaly Buka via llvm-commits wrote:<br>
> ubisan test is broken after this patch<br>
> <a href="http://lab.llvm.org:8011/builders/sanitizer-x86_64-linux/builds/25144/steps/check-ubsan%20in%20gcc%20build/logs/stdio" rel="noreferrer" target="_blank">http://lab.llvm.org:8011/builders/sanitizer-x86_64-linux/builds/25144/steps/check-ubsan%20in%20gcc%20build/logs/stdio</a><br>
> I tried to revert locally and it resolved the issue.<br>
><br>
> On Tue, Aug 23, 2016 at 2:27 PM Kostya Serebryany via llvm-commits<br>
> <<a href="mailto:llvm-commits@lists.llvm.org" target="_blank">llvm-commits@lists.llvm.org</a> <mailto:<a href="mailto:llvm-commits@lists.llvm.org" target="_blank">llvm-commits@lists.llvm.org</a>>> wrote:<br>
><br>
> Author: kcc<br>
> Date: Tue Aug 23 16:19:47 2016<br>
> New Revision: 279572<br>
><br>
> URL: <a href="http://llvm.org/viewvc/llvm-project?rev=279572&view=rev" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project?rev=279572&view=rev</a><br>
> Log:<br>
> [sanitizer] change the 64-bit allocator to use a single array for<br>
> free-d chunks instead of a lock-free linked list of tranfer batches.<br>
> This change simplifies the code, makes the allocator more<br>
> 'hardened', and will allow simpler code to release RAM to OS. This<br>
> may also slowdown malloc stress tests due to lock contension, but I<br>
> did not observe noticeable slowdown on various real multi-threaded<br>
> benchmarks.<br>
><br>
> Modified:<br>
><br>
> compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h<br>
><br>
> compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary64.h<br>
><br>
> compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc<br>
><br>
> Modified:<br>
> compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h<br>
> URL:<br>
> <a href="http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h?rev=279572&r1=279571&r2=279572&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h?rev=279572&r1=279571&r2=279572&view=diff</a><br>
> ==============================================================================<br>
> ---<br>
> compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h<br>
> (original)<br>
> +++<br>
> compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_local_cache.h<br>
> Tue Aug 23 16:19:47 2016<br>
> @@ -26,8 +26,9 @@ struct SizeClassAllocatorLocalCache<br>
> template <class SizeClassAllocator><br>
> struct SizeClassAllocator64LocalCache {<br>
> typedef SizeClassAllocator Allocator;<br>
> - typedef typename Allocator::TransferBatch TransferBatch;<br>
> static const uptr kNumClasses = SizeClassAllocator::kNumClasses;<br>
> + typedef typename Allocator::SizeClassMapT SizeClassMap;<br>
> + typedef typename Allocator::CompactPtrT CompactPtrT;<br>
><br>
> void Init(AllocatorGlobalStats *s) {<br>
> stats_.Init();<br>
> @@ -47,9 +48,11 @@ struct SizeClassAllocator64LocalCache {<br>
> stats_.Add(AllocatorStatAllocated,<br>
> Allocator::ClassIdToSize(class_id));<br>
> PerClass *c = &per_class_[class_id];<br>
> if (UNLIKELY(c->count == 0))<br>
> - Refill(allocator, class_id);<br>
> - void *res = c->batch[--c->count];<br>
> - PREFETCH(c->batch[c->count - 1]);<br>
> + Refill(c, allocator, class_id);<br>
> + CHECK_GT(c->count, 0);<br>
> + CompactPtrT chunk = c->chunks[--c->count];<br>
> + void *res = reinterpret_cast<void<br>
> *>(allocator->CompactPtrToPointer(<br>
> + allocator->GetRegionBeginBySizeClass(class_id), chunk));<br>
> return res;<br>
> }<br>
><br>
> @@ -63,24 +66,26 @@ struct SizeClassAllocator64LocalCache {<br>
> PerClass *c = &per_class_[class_id];<br>
> CHECK_NE(c->max_count, 0UL);<br>
> if (UNLIKELY(c->count == c->max_count))<br>
> - Drain(allocator, class_id);<br>
> - c->batch[c->count++] = p;<br>
> + Drain(c, allocator, class_id, c->max_count / 2);<br>
> + CompactPtrT chunk = allocator->PointerToCompactPtr(<br>
> + allocator->GetRegionBeginBySizeClass(class_id),<br>
> + reinterpret_cast<uptr>(p));<br>
> + c->chunks[c->count++] = chunk;<br>
> }<br>
><br>
> void Drain(SizeClassAllocator *allocator) {<br>
> for (uptr class_id = 0; class_id < kNumClasses; class_id++) {<br>
> PerClass *c = &per_class_[class_id];<br>
> while (c->count > 0)<br>
> - Drain(allocator, class_id);<br>
> + Drain(c, allocator, class_id, c->count);<br>
> }<br>
> }<br>
><br>
> // private:<br>
> - typedef typename SizeClassAllocator::SizeClassMapT SizeClassMap;<br>
> struct PerClass {<br>
> - uptr count;<br>
> - uptr max_count;<br>
> - void *batch[2 * TransferBatch::kMaxNumCached];<br>
> + u32 count;<br>
> + u32 max_count;<br>
> + CompactPtrT chunks[2 * SizeClassMap::kMaxNumCachedHint];<br>
> };<br>
> PerClass per_class_[kNumClasses];<br>
> AllocatorStats stats_;<br>
> @@ -90,77 +95,27 @@ struct SizeClassAllocator64LocalCache {<br>
> return;<br>
> for (uptr i = 0; i < kNumClasses; i++) {<br>
> PerClass *c = &per_class_[i];<br>
> - c->max_count = 2 * TransferBatch::MaxCached(i);<br>
> + c->max_count = 2 * SizeClassMap::MaxCachedHint(i);<br>
> }<br>
> }<br>
><br>
> - // TransferBatch class is declared in SizeClassAllocator.<br>
> - // We transfer chunks between central and thread-local free lists<br>
> in batches.<br>
> - // For small size classes we allocate batches separately.<br>
> - // For large size classes we may use one of the chunks to store<br>
> the batch.<br>
> - // sizeof(TransferBatch) must be a power of 2 for more efficient<br>
> allocation.<br>
> -<br>
> - // If kUseSeparateSizeClassForBatch is true,<br>
> - // all TransferBatch objects are allocated from kBatchClassID<br>
> - // size class (except for those that are needed for kBatchClassID<br>
> itself).<br>
> - // The goal is to have TransferBatches in a totally different<br>
> region of RAM<br>
> - // to improve security and allow more efficient RAM reclamation.<br>
> - // This is experimental and may currently increase memory usage<br>
> by up to 3%<br>
> - // in extreme cases.<br>
> - static const bool kUseSeparateSizeClassForBatch = false;<br>
> -<br>
> - static uptr SizeClassForTransferBatch(uptr class_id) {<br>
> - if (kUseSeparateSizeClassForBatch)<br>
> - return class_id == SizeClassMap::kBatchClassID<br>
> - ? 0<br>
> - : SizeClassMap::kBatchClassID;<br>
> - if (Allocator::ClassIdToSize(class_id) <<br>
> - TransferBatch::AllocationSizeRequiredForNElements(<br>
> - TransferBatch::MaxCached(class_id)))<br>
> - return SizeClassMap::ClassID(sizeof(TransferBatch));<br>
> - return 0;<br>
> - }<br>
> -<br>
> - // Returns a TransferBatch suitable for class_id.<br>
> - // For small size classes allocates the batch from the allocator.<br>
> - // For large size classes simply returns b.<br>
> - TransferBatch *CreateBatch(uptr class_id, SizeClassAllocator<br>
> *allocator,<br>
> - TransferBatch *b) {<br>
> - if (uptr batch_class_id = SizeClassForTransferBatch(class_id))<br>
> - return (TransferBatch*)Allocate(allocator, batch_class_id);<br>
> - return b;<br>
> - }<br>
> -<br>
> - // Destroys TransferBatch b.<br>
> - // For small size classes deallocates b to the allocator.<br>
> - // Does notthing for large size classes.<br>
> - void DestroyBatch(uptr class_id, SizeClassAllocator *allocator,<br>
> - TransferBatch *b) {<br>
> - if (uptr batch_class_id = SizeClassForTransferBatch(class_id))<br>
> - Deallocate(allocator, batch_class_id, b);<br>
> - }<br>
> -<br>
> - NOINLINE void Refill(SizeClassAllocator *allocator, uptr class_id) {<br>
> + NOINLINE void Refill(PerClass *c, SizeClassAllocator *allocator,<br>
> + uptr class_id) {<br>
> InitCache();<br>
> - PerClass *c = &per_class_[class_id];<br>
> - TransferBatch *b = allocator->AllocateBatch(&stats_, this,<br>
> class_id);<br>
> - CHECK_GT(b->Count(), 0);<br>
> - b->CopyToArray(c->batch);<br>
> - c->count = b->Count();<br>
> - DestroyBatch(class_id, allocator, b);<br>
> + uptr num_requested_chunks = SizeClassMap::MaxCachedHint(class_id);<br>
> + allocator->GetFromAllocator(&stats_, class_id, c->chunks,<br>
> + num_requested_chunks);<br>
> + c->count = num_requested_chunks;<br>
> }<br>
><br>
> - NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) {<br>
> + NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator,<br>
> uptr class_id,<br>
> + uptr count) {<br>
> InitCache();<br>
> - PerClass *c = &per_class_[class_id];<br>
> - uptr cnt = Min(c->max_count / 2, c->count);<br>
> - uptr first_idx_to_drain = c->count - cnt;<br>
> - TransferBatch *b = CreateBatch(<br>
> - class_id, allocator, (TransferBatch<br>
> *)c->batch[first_idx_to_drain]);<br>
> - b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id),<br>
> - &c->batch[first_idx_to_drain], cnt);<br>
> - c->count -= cnt;<br>
> - allocator->DeallocateBatch(&stats_, class_id, b);<br>
> + CHECK_GE(c->count, count);<br>
> + uptr first_idx_to_drain = c->count - count;<br>
> + c->count -= count;<br>
> + allocator->ReturnToAllocator(&stats_, class_id,<br>
> + &c->chunks[first_idx_to_drain],<br>
> count);<br>
> }<br>
> };<br>
><br>
><br>
> Modified:<br>
> compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary64.h<br>
> URL:<br>
> <a href="http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary64.h?rev=279572&r1=279571&r2=279572&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary64.h?rev=279572&r1=279571&r2=279572&view=diff</a><br>
> ==============================================================================<br>
> ---<br>
> compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary64.h<br>
> (original)<br>
> +++<br>
> compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary64.h<br>
> Tue Aug 23 16:19:47 2016<br>
> @@ -30,75 +30,31 @@ template<class SizeClassAllocator> struc<br>
> //<br>
> // UserChunk: a piece of memory returned to user.<br>
> // MetaChunk: kMetadataSize bytes of metadata associated with a<br>
> UserChunk.<br>
> +<br>
> +// FreeArray is an array free-d chunks (stored as 4-byte offsets)<br>
> //<br>
> // A Region looks like this:<br>
> -// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1<br>
> +// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1 FreeArray<br>
> template <const uptr kSpaceBeg, const uptr kSpaceSize,<br>
> const uptr kMetadataSize, class SizeClassMap,<br>
> class MapUnmapCallback = NoOpMapUnmapCallback><br>
> class SizeClassAllocator64 {<br>
> public:<br>
> - struct TransferBatch {<br>
> - static const uptr kMaxNumCached =<br>
> SizeClassMap::kMaxNumCachedHint - 4;<br>
> - void SetFromRange(uptr region_beg, uptr beg_offset, uptr step,<br>
> uptr count) {<br>
> - count_ = count;<br>
> - CHECK_LE(count_, kMaxNumCached);<br>
> - region_beg_ = region_beg;<br>
> - for (uptr i = 0; i < count; i++)<br>
> - batch_[i] = static_cast<u32>((beg_offset + i * step) >> 4);<br>
> - }<br>
> - void SetFromArray(uptr region_beg, void *batch[], uptr count) {<br>
> - count_ = count;<br>
> - CHECK_LE(count_, kMaxNumCached);<br>
> - region_beg_ = region_beg;<br>
> - for (uptr i = 0; i < count; i++)<br>
> - batch_[i] = static_cast<u32>(<br>
> - ((reinterpret_cast<uptr>(batch[i])) - region_beg) >> 4);<br>
> - }<br>
> - void CopyToArray(void *to_batch[]) {<br>
> - for (uptr i = 0, n = Count(); i < n; i++)<br>
> - to_batch[i] = reinterpret_cast<void*>(Get(i));<br>
> - }<br>
> - uptr Count() const { return count_; }<br>
> -<br>
> - // How much memory do we need for a batch containing n elements.<br>
> - static uptr AllocationSizeRequiredForNElements(uptr n) {<br>
> - return sizeof(uptr) * 2 + sizeof(u32) * n;<br>
> - }<br>
> - static uptr MaxCached(uptr class_id) {<br>
> - return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(class_id));<br>
> - }<br>
> -<br>
> - TransferBatch *next;<br>
> -<br>
> - private:<br>
> - uptr Get(uptr i) {<br>
> - return region_beg_ + (static_cast<uptr>(batch_[i]) << 4);<br>
> - }<br>
> - // Instead of storing 64-bit pointers we store 32-bit offsets<br>
> from the<br>
> - // region start divided by 4. This imposes two limitations:<br>
> - // * all allocations are 16-aligned,<br>
> - // * regions are not larger than 2^36.<br>
> - uptr region_beg_ : SANITIZER_WORDSIZE - 10; // Region-beg is<br>
> 4096-aligned.<br>
> - uptr count_ : 10;<br>
> - u32 batch_[kMaxNumCached];<br>
> - };<br>
> - static const uptr kBatchSize = sizeof(TransferBatch);<br>
> - COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);<br>
> - COMPILER_CHECK(sizeof(TransferBatch) ==<br>
> - SizeClassMap::kMaxNumCachedHint * sizeof(u32));<br>
> - COMPILER_CHECK(TransferBatch::kMaxNumCached < 1024); // count_<br>
> uses 10 bits.<br>
> -<br>
> - static uptr ClassIdToSize(uptr class_id) {<br>
> - return class_id == SizeClassMap::kBatchClassID<br>
> - ? sizeof(TransferBatch)<br>
> - : SizeClassMap::Size(class_id);<br>
> - }<br>
> -<br>
> typedef SizeClassAllocator64<kSpaceBeg, kSpaceSize, kMetadataSize,<br>
> - SizeClassMap, MapUnmapCallback> ThisT;<br>
> + SizeClassMap, MapUnmapCallback><br>
> + ThisT;<br>
> typedef SizeClassAllocator64LocalCache<ThisT> AllocatorCache;<br>
><br>
> + // When we know the size class (the region base) we can represent<br>
> a pointer<br>
> + // as a 4-byte integer (offset from the region start shifted<br>
> right by 4).<br>
> + typedef u32 CompactPtrT;<br>
> + CompactPtrT PointerToCompactPtr(uptr base, uptr ptr) {<br>
> + return static_cast<CompactPtrT>((ptr - base) >> 4);<br>
> + }<br>
> + uptr CompactPtrToPointer(uptr base, CompactPtrT ptr32) {<br>
> + return base + (static_cast<uptr>(ptr32) << 4);<br>
> + }<br>
> +<br>
> void Init() {<br>
> uptr TotalSpaceSize = kSpaceSize + AdditionalSize();<br>
> if (kUsingConstantSpaceBeg) {<br>
> @@ -127,25 +83,40 @@ class SizeClassAllocator64 {<br>
> alignment <= SizeClassMap::kMaxSize;<br>
> }<br>
><br>
> - NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat,<br>
> AllocatorCache *c,<br>
> - uptr class_id) {<br>
> - CHECK_LT(class_id, kNumClasses);<br>
> + NOINLINE void ReturnToAllocator(AllocatorStats *stat, uptr class_id,<br>
> + const CompactPtrT *chunks, uptr<br>
> n_chunks) {<br>
> RegionInfo *region = GetRegionInfo(class_id);<br>
> - TransferBatch *b = region->free_list.Pop();<br>
> - if (!b)<br>
> - b = PopulateFreeList(stat, c, class_id, region);<br>
> - region->n_allocated += b->Count();<br>
> - return b;<br>
> + uptr region_beg = GetRegionBeginBySizeClass(class_id);<br>
> + CompactPtrT *free_array = GetFreeArray(region_beg);<br>
> +<br>
> + BlockingMutexLock l(®ion->mutex);<br>
> + uptr old_num_chunks = region->num_freed_chunks;<br>
> + uptr new_num_freed_chunks = old_num_chunks + n_chunks;<br>
> + EnsureFreeArraySpace(region, region_beg, new_num_freed_chunks);<br>
> + for (uptr i = 0; i < n_chunks; i++)<br>
> + free_array[old_num_chunks + i] = chunks[i];<br>
> + region->num_freed_chunks = new_num_freed_chunks;<br>
> }<br>
><br>
> - NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,<br>
> - TransferBatch *b) {<br>
> + NOINLINE void GetFromAllocator(AllocatorStats *stat, uptr class_id,<br>
> + CompactPtrT *chunks, uptr n_chunks) {<br>
> RegionInfo *region = GetRegionInfo(class_id);<br>
> - CHECK_GT(b->Count(), 0);<br>
> - region->free_list.Push(b);<br>
> - region->n_freed += b->Count();<br>
> + uptr region_beg = GetRegionBeginBySizeClass(class_id);<br>
> + CompactPtrT *free_array = GetFreeArray(region_beg);<br>
> +<br>
> + BlockingMutexLock l(®ion->mutex);<br>
> + if (UNLIKELY(region->num_freed_chunks < n_chunks)) {<br>
> + PopulateFreeArray(stat, class_id, region,<br>
> + n_chunks - region->num_freed_chunks);<br>
> + CHECK_GE(region->num_freed_chunks, n_chunks);<br>
> + }<br>
> + region->num_freed_chunks -= n_chunks;<br>
> + uptr base_idx = region->num_freed_chunks;<br>
> + for (uptr i = 0; i < n_chunks; i++)<br>
> + chunks[i] = free_array[base_idx + i];<br>
> }<br>
><br>
> +<br>
> bool PointerIsMine(const void *p) {<br>
> uptr P = reinterpret_cast<uptr>(p);<br>
> if (kUsingConstantSpaceBeg && (kSpaceBeg % kSpaceSize) == 0)<br>
> @@ -198,8 +169,8 @@ class SizeClassAllocator64 {<br>
> uptr class_id = GetSizeClass(p);<br>
> uptr size = ClassIdToSize(class_id);<br>
> uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);<br>
> - return reinterpret_cast<void *>(SpaceBeg() +<br>
> - (kRegionSize * (class_id + 1)) -<br>
> + uptr region_beg = GetRegionBeginBySizeClass(class_id);<br>
> + return reinterpret_cast<void *>(GetMetadataEnd(region_beg) -<br>
> (1 + chunk_idx) * kMetadataSize);<br>
> }<br>
><br>
> @@ -286,6 +257,10 @@ class SizeClassAllocator64 {<br>
> }<br>
> }<br>
><br>
> + static uptr ClassIdToSize(uptr class_id) {<br>
> + return SizeClassMap::Size(class_id);<br>
> + }<br>
> +<br>
> static uptr AdditionalSize() {<br>
> return RoundUpTo(sizeof(RegionInfo) * kNumClassesRounded,<br>
> GetPageSizeCached());<br>
> @@ -297,6 +272,11 @@ class SizeClassAllocator64 {<br>
><br>
> private:<br>
> static const uptr kRegionSize = kSpaceSize / kNumClassesRounded;<br>
> + // FreeArray is the array of free-d chunks (stored as 4-byte<br>
> offsets).<br>
> + // In the worst case it may reguire<br>
> kRegionSize/SizeClassMap::kMinSize<br>
> + // elements, but in reality this will not happen. For simplicity we<br>
> + // dedicate 1/8 of the region's virtual space to FreeArray.<br>
> + static const uptr kFreeArraySize = kRegionSize / 8;<br>
><br>
> static const bool kUsingConstantSpaceBeg = kSpaceBeg != ~(uptr)0;<br>
> uptr NonConstSpaceBeg;<br>
> @@ -306,16 +286,19 @@ class SizeClassAllocator64 {<br>
> uptr SpaceEnd() const { return SpaceBeg() + kSpaceSize; }<br>
> // kRegionSize must be >= 2^32.<br>
> COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));<br>
> - // kRegionSize must be <= 2^36, see TransferBatch.<br>
> + // kRegionSize must be <= 2^36, see CompactPtrT.<br>
> COMPILER_CHECK((kRegionSize) <= (1ULL << (SANITIZER_WORDSIZE / 2<br>
> + 4)));<br>
> // Call mmap for user memory with at least this size.<br>
> static const uptr kUserMapSize = 1 << 16;<br>
> // Call mmap for metadata memory with at least this size.<br>
> static const uptr kMetaMapSize = 1 << 16;<br>
> + // Call mmap for free array memory with at least this size.<br>
> + static const uptr kFreeArrayMapSize = 1 << 12;<br>
><br>
> struct RegionInfo {<br>
> BlockingMutex mutex;<br>
> - LFStack<TransferBatch> free_list;<br>
> + uptr num_freed_chunks; // Number of elements in the freearray.<br>
> + uptr mapped_free_array; // Bytes mapped for freearray.<br>
> uptr allocated_user; // Bytes allocated for user memory.<br>
> uptr allocated_meta; // Bytes allocated for metadata.<br>
> uptr mapped_user; // Bytes mapped for user memory.<br>
> @@ -331,6 +314,10 @@ class SizeClassAllocator64 {<br>
> return ®ions[class_id];<br>
> }<br>
><br>
> + uptr GetMetadataEnd(uptr region_beg) {<br>
> + return region_beg + kRegionSize - kFreeArraySize;<br>
> + }<br>
> +<br>
> uptr GetChunkIdx(uptr chunk, uptr size) {<br>
> if (!kUsingConstantSpaceBeg)<br>
> chunk -= SpaceBeg();<br>
> @@ -343,18 +330,33 @@ class SizeClassAllocator64 {<br>
> return (u32)offset / (u32)size;<br>
> }<br>
><br>
> - NOINLINE TransferBatch *PopulateFreeList(AllocatorStats *stat,<br>
> - AllocatorCache *c, uptr<br>
> class_id,<br>
> - RegionInfo *region) {<br>
> - BlockingMutexLock l(®ion->mutex);<br>
> - TransferBatch *b = region->free_list.Pop();<br>
> - if (b)<br>
> - return b;<br>
> + CompactPtrT *GetFreeArray(uptr region_beg) {<br>
> + return reinterpret_cast<CompactPtrT *>(region_beg + kRegionSize -<br>
> + kFreeArraySize);<br>
> + }<br>
> +<br>
> + void EnsureFreeArraySpace(RegionInfo *region, uptr region_beg,<br>
> + uptr num_freed_chunks) {<br>
> + uptr needed_space = num_freed_chunks * sizeof(CompactPtrT);<br>
> + if (region->mapped_free_array < needed_space) {<br>
> + CHECK_LE(needed_space, kFreeArraySize);<br>
> + uptr new_mapped_free_array = RoundUpTo(needed_space,<br>
> kFreeArrayMapSize);<br>
> + uptr current_map_end =<br>
> reinterpret_cast<uptr>(GetFreeArray(region_beg)) +<br>
> + region->mapped_free_array;<br>
> + uptr new_map_size = new_mapped_free_array -<br>
> region->mapped_free_array;<br>
> + MapWithCallback(current_map_end, new_map_size);<br>
> + region->mapped_free_array = new_mapped_free_array;<br>
> + }<br>
> + }<br>
> +<br>
> +<br>
> + NOINLINE void PopulateFreeArray(AllocatorStats *stat, uptr class_id,<br>
> + RegionInfo *region, uptr<br>
> requested_count) {<br>
> + // region->mutex is held.<br>
> uptr size = ClassIdToSize(class_id);<br>
> - uptr count = TransferBatch::MaxCached(class_id);<br>
> uptr beg_idx = region->allocated_user;<br>
> - uptr end_idx = beg_idx + count * size;<br>
> - uptr region_beg = SpaceBeg() + kRegionSize * class_id;<br>
> + uptr end_idx = beg_idx + requested_count * size;<br>
> + uptr region_beg = GetRegionBeginBySizeClass(class_id);<br>
> if (end_idx + size > region->mapped_user) {<br>
> // Do the mmap for the user memory.<br>
> uptr map_size = kUserMapSize;<br>
> @@ -365,8 +367,19 @@ class SizeClassAllocator64 {<br>
> stat->Add(AllocatorStatMapped, map_size);<br>
> region->mapped_user += map_size;<br>
> }<br>
> - uptr total_count = (region->mapped_user - beg_idx - size)<br>
> - / size / count * count;<br>
> + CompactPtrT *free_array = GetFreeArray(region_beg);<br>
> + uptr total_count = (region->mapped_user - beg_idx) / size;<br>
> + uptr num_freed_chunks = region->num_freed_chunks;<br>
> + EnsureFreeArraySpace(region, region_beg, num_freed_chunks +<br>
> total_count);<br>
> + for (uptr i = 0; i < total_count; i++) {<br>
> + uptr chunk = beg_idx + i * size;<br>
> + free_array[num_freed_chunks + total_count - 1 - i] =<br>
> + PointerToCompactPtr(0, chunk);<br>
> + }<br>
> + region->num_freed_chunks += total_count;<br>
> + region->allocated_user += total_count * size;<br>
> + CHECK_LE(region->allocated_user, region->mapped_user);<br>
> +<br>
> region->allocated_meta += total_count * kMetadataSize;<br>
> if (region->allocated_meta > region->mapped_meta) {<br>
> uptr map_size = kMetaMapSize;<br>
> @@ -374,7 +387,7 @@ class SizeClassAllocator64 {<br>
> map_size += kMetaMapSize;<br>
> // Do the mmap for the metadata.<br>
> CHECK_GE(region->mapped_meta + map_size, region->allocated_meta);<br>
> - MapWithCallback(region_beg + kRegionSize -<br>
> + MapWithCallback(GetMetadataEnd(region_beg) -<br>
> region->mapped_meta - map_size, map_size);<br>
> region->mapped_meta += map_size;<br>
> }<br>
> @@ -385,19 +398,6 @@ class SizeClassAllocator64 {<br>
> kRegionSize / 1024 / 1024, size);<br>
> Die();<br>
> }<br>
> - for (;;) {<br>
> - b = c->CreateBatch(class_id, this,<br>
> - (TransferBatch *)(region_beg + beg_idx));<br>
> - b->SetFromRange(region_beg, beg_idx, size, count);<br>
> - region->allocated_user += count * size;<br>
> - CHECK_LE(region->allocated_user, region->mapped_user);<br>
> - beg_idx += count * size;<br>
> - if (beg_idx + count * size + size > region->mapped_user)<br>
> - break;<br>
> - CHECK_GT(b->Count(), 0);<br>
> - region->free_list.Push(b);<br>
> - }<br>
> - return b;<br>
> }<br>
> };<br>
><br>
><br>
> Modified:<br>
> compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc<br>
> URL:<br>
> <a href="http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc?rev=279572&r1=279571&r2=279572&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc?rev=279572&r1=279571&r2=279572&view=diff</a><br>
> ==============================================================================<br>
> ---<br>
> compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc<br>
> (original)<br>
> +++<br>
> compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc<br>
> Tue Aug 23 16:19:47 2016<br>
> @@ -99,8 +99,10 @@ void TestSizeClassAllocator() {<br>
> memset(&cache, 0, sizeof(cache));<br>
> cache.Init(0);<br>
><br>
> - static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,<br>
> - 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};<br>
> + static const uptr sizes[] = {<br>
> + 1, 16, 30, 40, 100, 1000, 10000,<br>
> + 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000<br>
> + };<br>
><br>
> std::vector<void *> allocated;<br>
><br>
> @@ -300,8 +302,11 @@ TEST(SanitizerCommon, SizeClassAllocator<br>
> cache.Init(0);<br>
> AllocatorStats stats;<br>
> stats.Init();<br>
> - a->AllocateBatch(&stats, &cache, 32);<br>
> - EXPECT_EQ(TestMapUnmapCallback::map_count, 3); // State + alloc<br>
> + metadata.<br>
> + const size_t kNumChunks = 128;<br>
> + uint32_t chunks[kNumChunks];<br>
> + a->GetFromAllocator(&stats, 32, chunks, kNumChunks);<br>
> + // State + alloc + metadata + freearray.<br>
> + EXPECT_EQ(TestMapUnmapCallback::map_count, 4);<br>
> a->TestOnlyUnmap();<br>
> EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing.<br>
> delete a;<br>
> @@ -360,8 +365,10 @@ void FailInAssertionOnOOM() {<br>
> cache.Init(0);<br>
> AllocatorStats stats;<br>
> stats.Init();<br>
> + const size_t kNumChunks = 128;<br>
> + uint32_t chunks[kNumChunks];<br>
> for (int i = 0; i < 1000000; i++) {<br>
> - a.AllocateBatch(&stats, &cache, 52);<br>
> + a.GetFromAllocator(&stats, 52, chunks, kNumChunks);<br>
> }<br>
><br>
> a.TestOnlyUnmap();<br>
><br>
><br>
> _______________________________________________<br>
> llvm-commits mailing list<br>
> <a href="mailto:llvm-commits@lists.llvm.org" target="_blank">llvm-commits@lists.llvm.org</a> <mailto:<a href="mailto:llvm-commits@lists.llvm.org" target="_blank">llvm-commits@lists.llvm.org</a>><br>
> <a href="http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits" rel="noreferrer" target="_blank">http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits</a><br>
><br>
><br>
><br>
> _______________________________________________<br>
> llvm-commits mailing list<br>
> <a href="mailto:llvm-commits@lists.llvm.org" target="_blank">llvm-commits@lists.llvm.org</a><br>
> <a href="http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits" rel="noreferrer" target="_blank">http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits</a><br>
><br>
<br>
<br>
--<br>
<br>
-Bill Seurer<br>
<br>
_______________________________________________<br>
llvm-commits mailing list<br>
<a href="mailto:llvm-commits@lists.llvm.org" target="_blank">llvm-commits@lists.llvm.org</a><br>
<a href="http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits" rel="noreferrer" target="_blank">http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits</a><br>
</blockquote></div>