[compiler-rt] r326007 - [Sanitizers] Increase allocated chunk limit for LargeMmapAllocator
Alex Shlyapnikov via llvm-commits
llvm-commits at lists.llvm.org
Fri Feb 23 17:00:28 PST 2018
Author: alekseyshl
Date: Fri Feb 23 17:00:27 2018
New Revision: 326007
URL: http://llvm.org/viewvc/llvm-project?rev=326007&view=rev
Log:
[Sanitizers] Increase allocated chunk limit for LargeMmapAllocator
Summary:
There are applications out there which allocate more than 1 << 18 large chunks
of memory (those handled by LargeMmapAllocator, aka secondary allocator).
For 64 bits, secondary allocator stores allocated chunks in a growing on
demand region of memory, growing in blocks of 128K, up to 1 << 20 chunks total.
Sanitizer internal allocator's secondary uses fixed size array storing up
to 1 << 15 chunks (down to 256K from 2Mb of memory used for that array).
Nothing is changed for 32 bits, chunks are still stored in the fixed size
array (up to 1 << 15 chunks).
Reviewers: eugenis
Subscribers: kubamracek, delcypher, #sanitizers, llvm-commits
Differential Revision: https://reviews.llvm.org/D43693
Modified:
compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_internal.h
compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_internal.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_internal.h?rev=326007&r1=326006&r2=326007&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_internal.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_internal.h Fri Feb 23 17:00:27 2018
@@ -46,9 +46,12 @@ typedef SizeClassAllocator32<AP32> Prima
typedef SizeClassAllocatorLocalCache<PrimaryInternalAllocator>
InternalAllocatorCache;
+typedef LargeMmapAllocator<NoOpMapUnmapCallback,
+ LargeMmapAllocatorPtrArrayStatic>
+ SecondaryInternalAllocator;
+
typedef CombinedAllocator<PrimaryInternalAllocator, InternalAllocatorCache,
- LargeMmapAllocator<NoOpMapUnmapCallback>
- > InternalAllocator;
+ SecondaryInternalAllocator> InternalAllocator;
void *InternalAlloc(uptr size, InternalAllocatorCache *cache = nullptr,
uptr alignment = 0);
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h?rev=326007&r1=326006&r2=326007&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_secondary.h Fri Feb 23 17:00:27 2018
@@ -14,14 +14,66 @@
#error This file must be included inside sanitizer_allocator.h
#endif
+// Fixed array to store LargeMmapAllocator chunks list, limited to 32K total
+// allocated chunks. To be used in memory constrained or not memory hungry cases
+// (currently, 32 bits and internal allocator).
+class LargeMmapAllocatorPtrArrayStatic {
+ public:
+ INLINE void *Init() { return &p_[0]; }
+ INLINE void EnsureSpace(uptr n) { CHECK_LT(n, kMaxNumChunks); }
+ private:
+ static const int kMaxNumChunks = 1 << 15;
+ uptr p_[kMaxNumChunks];
+};
+
+// Much less restricted LargeMmapAllocator chunks list (comparing to
+// PtrArrayStatic). Backed by mmaped memory region and can hold up to 1M chunks.
+// ReservedAddressRange was used instead of just MAP_NORESERVE to achieve the
+// same functionality in Fuchsia case, which does not support MAP_NORESERVE.
+class LargeMmapAllocatorPtrArrayDynamic {
+ public:
+ INLINE void *Init() {
+ uptr p = address_range_.Init(kMaxNumChunks * sizeof(uptr),
+ "sanitizer_large_allocator");
+ CHECK(p);
+ return reinterpret_cast<void*>(p);
+ }
+
+ INLINE void EnsureSpace(uptr n) {
+ CHECK_LT(n, kMaxNumChunks);
+ DCHECK(n <= n_reserved_);
+ if (UNLIKELY(n == n_reserved_)) {
+ address_range_.MapOrDie(
+ reinterpret_cast<uptr>(address_range_.base()) +
+ n_reserved_ * sizeof(uptr),
+ kChunksBlockCount * sizeof(uptr));
+ n_reserved_ += kChunksBlockCount;
+ }
+ }
+
+ private:
+ static const int kMaxNumChunks = 1 << 20;
+ static const int kChunksBlockCount = 1 << 14;
+ ReservedAddressRange address_range_;
+ uptr n_reserved_;
+};
+
+#if SANITIZER_WORDSIZE == 32
+typedef LargeMmapAllocatorPtrArrayStatic DefaultLargeMmapAllocatorPtrArray;
+#else
+typedef LargeMmapAllocatorPtrArrayDynamic DefaultLargeMmapAllocatorPtrArray;
+#endif
+
// This class can (de)allocate only large chunks of memory using mmap/unmap.
// The main purpose of this allocator is to cover large and rare allocation
// sizes not covered by more efficient allocators (e.g. SizeClassAllocator64).
-template <class MapUnmapCallback = NoOpMapUnmapCallback>
+template <class MapUnmapCallback = NoOpMapUnmapCallback,
+ class PtrArrayT = DefaultLargeMmapAllocatorPtrArray>
class LargeMmapAllocator {
public:
void InitLinkerInitialized() {
page_size_ = GetPageSizeCached();
+ chunks_ = reinterpret_cast<Header**>(ptr_array_.Init());
}
void Init() {
@@ -63,11 +115,11 @@ class LargeMmapAllocator {
CHECK_LT(size_log, ARRAY_SIZE(stats.by_size_log));
{
SpinMutexLock l(&mutex_);
+ ptr_array_.EnsureSpace(n_chunks_);
uptr idx = n_chunks_++;
- chunks_sorted_ = false;
- CHECK_LT(idx, kMaxNumChunks);
h->chunk_idx = idx;
chunks_[idx] = h;
+ chunks_sorted_ = false;
stats.n_allocs++;
stats.currently_allocated += map_size;
stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
@@ -85,9 +137,8 @@ class LargeMmapAllocator {
uptr idx = h->chunk_idx;
CHECK_EQ(chunks_[idx], h);
CHECK_LT(idx, n_chunks_);
- chunks_[idx] = chunks_[n_chunks_ - 1];
+ chunks_[idx] = chunks_[--n_chunks_];
chunks_[idx]->chunk_idx = idx;
- n_chunks_--;
chunks_sorted_ = false;
stats.n_frees++;
stats.currently_allocated -= h->map_size;
@@ -223,7 +274,7 @@ class LargeMmapAllocator {
EnsureSortedChunks(); // Avoid doing the sort while iterating.
for (uptr i = 0; i < n_chunks_; i++) {
auto t = chunks_[i];
- callback(reinterpret_cast<uptr>(GetUser(chunks_[i])), arg);
+ callback(reinterpret_cast<uptr>(GetUser(t)), arg);
// Consistency check: verify that the array did not change.
CHECK_EQ(chunks_[i], t);
CHECK_EQ(chunks_[i]->chunk_idx, i);
@@ -231,7 +282,6 @@ class LargeMmapAllocator {
}
private:
- static const int kMaxNumChunks = 1 << FIRST_32_SECOND_64(15, 18);
struct Header {
uptr map_beg;
uptr map_size;
@@ -257,7 +307,8 @@ class LargeMmapAllocator {
}
uptr page_size_;
- Header *chunks_[kMaxNumChunks];
+ Header **chunks_;
+ PtrArrayT ptr_array_;
uptr n_chunks_;
bool chunks_sorted_;
struct Stats {
@@ -266,4 +317,3 @@ class LargeMmapAllocator {
SpinMutex mutex_;
};
-
More information about the llvm-commits
mailing list