[compiler-rt] 19ea1d4 - [scudo][standalone] Add a free list to the Secondary

Kostya Kortchinsky via llvm-commits llvm-commits at lists.llvm.org
Wed Oct 30 09:06:41 PDT 2019


Author: Kostya Kortchinsky
Date: 2019-10-30T08:55:58-07:00
New Revision: 19ea1d46ccfe7decd623ba3f860d8ba7a9f1bb44

URL: https://github.com/llvm/llvm-project/commit/19ea1d46ccfe7decd623ba3f860d8ba7a9f1bb44
DIFF: https://github.com/llvm/llvm-project/commit/19ea1d46ccfe7decd623ba3f860d8ba7a9f1bb44.diff

LOG: [scudo][standalone] Add a free list to the Secondary

Summary:
The secondary allocator is slow, because we map and unmap each block
on allocation and deallocation.

While I really like the security benefits of such a behavior, this
yields very disappointing performance numbers on Android for larger
allocation benchmarks.

So this change adds a free list to the secondary, that will hold
recently deallocated chunks, and (currently) release the extraneous
memory. This allows to save on some memory mapping operations on
allocation and deallocation. I do not think that this lowers the
security of the secondary, but can increase the memory footprint a
little bit (RSS & VA).

The maximum number of blocks the free list can hold is templatable,
`0U` meaning that we fallback to the old behavior. The higher that
number, the higher the extra memory footprint.

I added default configurations for all our platforms, but they are
likely to change in the near future based on needs and feedback.

Reviewers: hctim, morehouse, cferris, pcc, eugenis, vitalybuka

Subscribers: mgorny, #sanitizers, llvm-commits

Tags: #sanitizers, #llvm

Differential Revision: https://reviews.llvm.org/D69570

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/CMakeLists.txt
    compiler-rt/lib/scudo/standalone/allocator_config.h
    compiler-rt/lib/scudo/standalone/combined.h
    compiler-rt/lib/scudo/standalone/secondary.h
    compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
    compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp

Removed: 
    compiler-rt/lib/scudo/standalone/secondary.cpp


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/CMakeLists.txt b/compiler-rt/lib/scudo/standalone/CMakeLists.txt
index b793b42461d2..594cf78ea6f5 100644
--- a/compiler-rt/lib/scudo/standalone/CMakeLists.txt
+++ b/compiler-rt/lib/scudo/standalone/CMakeLists.txt
@@ -77,7 +77,6 @@ set(SCUDO_SOURCES
   fuchsia.cpp
   linux.cpp
   report.cpp
-  secondary.cpp
   string_utils.cpp
   )
 

diff  --git a/compiler-rt/lib/scudo/standalone/allocator_config.h b/compiler-rt/lib/scudo/standalone/allocator_config.h
index 62c6f2875106..166e19e2b8f2 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.h
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.h
@@ -14,6 +14,7 @@
 #include "flags.h"
 #include "primary32.h"
 #include "primary64.h"
+#include "secondary.h"
 #include "size_class_map.h"
 #include "tsd_exclusive.h"
 #include "tsd_shared.h"
@@ -31,6 +32,7 @@ struct DefaultConfig {
   // 512KB regions
   typedef SizeClassAllocator32<SizeClassMap, 19U> Primary;
 #endif
+  typedef MapAllocator<> Secondary;
   template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
 };
 
@@ -43,6 +45,7 @@ struct AndroidConfig {
   // 512KB regions
   typedef SizeClassAllocator32<SizeClassMap, 19U> Primary;
 #endif
+  typedef MapAllocator<> Secondary;
   template <class A>
   using TSDRegistryT = TSDRegistrySharedT<A, 2U>; // Shared, max 2 TSDs.
 };
@@ -56,6 +59,7 @@ struct AndroidSvelteConfig {
   // 64KB regions
   typedef SizeClassAllocator32<SizeClassMap, 16U> Primary;
 #endif
+  typedef MapAllocator<0U> Secondary;
   template <class A>
   using TSDRegistryT = TSDRegistrySharedT<A, 1U>; // Shared, only 1 TSD.
 };
@@ -63,6 +67,7 @@ struct AndroidSvelteConfig {
 struct FuchsiaConfig {
   // 1GB Regions
   typedef SizeClassAllocator64<DefaultSizeClassMap, 30U> Primary;
+  typedef MapAllocator<> Secondary;
   template <class A>
   using TSDRegistryT = TSDRegistrySharedT<A, 8U>; // Shared, max 8 TSDs.
 };

diff  --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 60be1dd20d39..dc9c8be342a5 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -449,7 +449,7 @@ template <class Params> class Allocator {
   }
 
 private:
-  typedef MapAllocator SecondaryT;
+  using SecondaryT = typename Params::Secondary;
   typedef typename PrimaryT::SizeClassMap SizeClassMap;
 
   static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;

diff  --git a/compiler-rt/lib/scudo/standalone/secondary.cpp b/compiler-rt/lib/scudo/standalone/secondary.cpp
deleted file mode 100644
index dbf15892836c..000000000000
--- a/compiler-rt/lib/scudo/standalone/secondary.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-//===-- secondary.cpp -------------------------------------------*- C++ -*-===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#include "secondary.h"
-
-#include "string_utils.h"
-
-namespace scudo {
-
-// As with the Primary, the size passed to this function includes any desired
-// alignment, so that the frontend can align the user allocation. The hint
-// parameter allows us to unmap spurious memory when dealing with larger
-// (greater than a page) alignments on 32-bit platforms.
-// Due to the sparsity of address space available on those platforms, requesting
-// an allocation from the Secondary with a large alignment would end up wasting
-// VA space (even though we are not committing the whole thing), hence the need
-// to trim off some of the reserved space.
-// For allocations requested with an alignment greater than or equal to a page,
-// the committed memory will amount to something close to Size - AlignmentHint
-// (pending rounding and headers).
-void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, uptr *BlockEnd) {
-  DCHECK_GT(Size, AlignmentHint);
-  const uptr PageSize = getPageSizeCached();
-  const uptr MapSize =
-      roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize) + 2 * PageSize;
-  MapPlatformData Data = {};
-  uptr MapBase =
-      reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
-                                 MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
-  if (UNLIKELY(!MapBase))
-    return nullptr;
-  uptr CommitBase = MapBase + PageSize;
-  uptr MapEnd = MapBase + MapSize;
-
-  // In the unlikely event of alignments larger than a page, adjust the amount
-  // of memory we want to commit, and trim the extra memory.
-  if (UNLIKELY(AlignmentHint >= PageSize)) {
-    // For alignments greater than or equal to a page, the user pointer (eg: the
-    // pointer that is returned by the C or C++ allocation APIs) ends up on a
-    // page boundary , and our headers will live in the preceding page.
-    CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
-    const uptr NewMapBase = CommitBase - PageSize;
-    DCHECK_GE(NewMapBase, MapBase);
-    // We only trim the extra memory on 32-bit platforms: 64-bit platforms
-    // are less constrained memory wise, and that saves us two syscalls.
-    if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
-      unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
-      MapBase = NewMapBase;
-    }
-    const uptr NewMapEnd = CommitBase + PageSize +
-                           roundUpTo((Size - AlignmentHint), PageSize) +
-                           PageSize;
-    DCHECK_LE(NewMapEnd, MapEnd);
-    if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
-      unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
-      MapEnd = NewMapEnd;
-    }
-  }
-
-  const uptr CommitSize = MapEnd - PageSize - CommitBase;
-  const uptr Ptr =
-      reinterpret_cast<uptr>(map(reinterpret_cast<void *>(CommitBase),
-                                 CommitSize, "scudo:secondary", 0, &Data));
-  LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
-  H->MapBase = MapBase;
-  H->MapSize = MapEnd - MapBase;
-  H->BlockEnd = CommitBase + CommitSize;
-  H->Data = Data;
-  {
-    ScopedLock L(Mutex);
-    InUseBlocks.push_back(H);
-    AllocatedBytes += CommitSize;
-    if (LargestSize < CommitSize)
-      LargestSize = CommitSize;
-    NumberOfAllocs++;
-    Stats.add(StatAllocated, CommitSize);
-    Stats.add(StatMapped, H->MapSize);
-  }
-  if (BlockEnd)
-    *BlockEnd = CommitBase + CommitSize;
-  return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
-}
-
-void MapAllocator::deallocate(void *Ptr) {
-  LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
-  {
-    ScopedLock L(Mutex);
-    InUseBlocks.remove(H);
-    const uptr CommitSize = H->BlockEnd - reinterpret_cast<uptr>(H);
-    FreedBytes += CommitSize;
-    NumberOfFrees++;
-    Stats.sub(StatAllocated, CommitSize);
-    Stats.sub(StatMapped, H->MapSize);
-  }
-  void *Addr = reinterpret_cast<void *>(H->MapBase);
-  const uptr Size = H->MapSize;
-  MapPlatformData Data;
-  Data = H->Data;
-  unmap(Addr, Size, UNMAP_ALL, &Data);
-}
-
-void MapAllocator::getStats(ScopedString *Str) const {
-  Str->append(
-      "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times "
-      "(%zuK), remains %zu (%zuK) max %zuM\n",
-      NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
-      NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
-      LargestSize >> 20);
-}
-
-} // namespace scudo

diff  --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index b6b41377e263..38e2d958dddc 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -48,7 +48,7 @@ static Header *getHeader(const void *Ptr) {
 
 } // namespace LargeBlock
 
-class MapAllocator {
+template <uptr MaxFreeListSize = 32U> class MapAllocator {
 public:
   void initLinkerInitialized(GlobalStats *S) {
     Stats.initLinkerInitialized();
@@ -83,9 +83,13 @@ class MapAllocator {
       Callback(reinterpret_cast<uptr>(&H) + LargeBlock::getHeaderSize());
   }
 
+  static uptr getMaxFreeListSize(void) { return MaxFreeListSize; }
+
 private:
   HybridMutex Mutex;
   DoublyLinkedList<LargeBlock::Header> InUseBlocks;
+  // The free list is sorted based on the committed size of blocks.
+  DoublyLinkedList<LargeBlock::Header> FreeBlocks;
   uptr AllocatedBytes;
   uptr FreedBytes;
   uptr LargestSize;
@@ -94,6 +98,154 @@ class MapAllocator {
   LocalStats Stats;
 };
 
+// As with the Primary, the size passed to this function includes any desired
+// alignment, so that the frontend can align the user allocation. The hint
+// parameter allows us to unmap spurious memory when dealing with larger
+// (greater than a page) alignments on 32-bit platforms.
+// Due to the sparsity of address space available on those platforms, requesting
+// an allocation from the Secondary with a large alignment would end up wasting
+// VA space (even though we are not committing the whole thing), hence the need
+// to trim off some of the reserved space.
+// For allocations requested with an alignment greater than or equal to a page,
+// the committed memory will amount to something close to Size - AlignmentHint
+// (pending rounding and headers).
+template <uptr MaxFreeListSize>
+void *MapAllocator<MaxFreeListSize>::allocate(uptr Size, uptr AlignmentHint,
+                                              uptr *BlockEnd) {
+  DCHECK_GT(Size, AlignmentHint);
+  const uptr PageSize = getPageSizeCached();
+  const uptr RoundedSize =
+      roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize);
+
+  if (MaxFreeListSize && AlignmentHint < PageSize) {
+    ScopedLock L(Mutex);
+    for (auto &H : FreeBlocks) {
+      const uptr FreeBlockSize = H.BlockEnd - reinterpret_cast<uptr>(&H);
+      if (FreeBlockSize < RoundedSize)
+        continue;
+      // Candidate free block should only be at most 4 pages larger.
+      if (FreeBlockSize > RoundedSize + 4 * PageSize)
+        break;
+      FreeBlocks.remove(&H);
+      InUseBlocks.push_back(&H);
+      AllocatedBytes += FreeBlockSize;
+      NumberOfAllocs++;
+      Stats.add(StatAllocated, FreeBlockSize);
+      if (BlockEnd)
+        *BlockEnd = H.BlockEnd;
+      return reinterpret_cast<void *>(reinterpret_cast<uptr>(&H) +
+                                      LargeBlock::getHeaderSize());
+    }
+  }
+
+  MapPlatformData Data = {};
+  const uptr MapSize = RoundedSize + 2 * PageSize;
+  uptr MapBase =
+      reinterpret_cast<uptr>(map(nullptr, MapSize, "scudo:secondary",
+                                 MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
+  if (UNLIKELY(!MapBase))
+    return nullptr;
+  uptr CommitBase = MapBase + PageSize;
+  uptr MapEnd = MapBase + MapSize;
+
+  // In the unlikely event of alignments larger than a page, adjust the amount
+  // of memory we want to commit, and trim the extra memory.
+  if (UNLIKELY(AlignmentHint >= PageSize)) {
+    // For alignments greater than or equal to a page, the user pointer (eg: the
+    // pointer that is returned by the C or C++ allocation APIs) ends up on a
+    // page boundary , and our headers will live in the preceding page.
+    CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
+    const uptr NewMapBase = CommitBase - PageSize;
+    DCHECK_GE(NewMapBase, MapBase);
+    // We only trim the extra memory on 32-bit platforms: 64-bit platforms
+    // are less constrained memory wise, and that saves us two syscalls.
+    if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
+      unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
+      MapBase = NewMapBase;
+    }
+    const uptr NewMapEnd = CommitBase + PageSize +
+                           roundUpTo((Size - AlignmentHint), PageSize) +
+                           PageSize;
+    DCHECK_LE(NewMapEnd, MapEnd);
+    if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
+      unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
+      MapEnd = NewMapEnd;
+    }
+  }
+
+  const uptr CommitSize = MapEnd - PageSize - CommitBase;
+  const uptr Ptr =
+      reinterpret_cast<uptr>(map(reinterpret_cast<void *>(CommitBase),
+                                 CommitSize, "scudo:secondary", 0, &Data));
+  LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
+  H->MapBase = MapBase;
+  H->MapSize = MapEnd - MapBase;
+  H->BlockEnd = CommitBase + CommitSize;
+  H->Data = Data;
+  {
+    ScopedLock L(Mutex);
+    InUseBlocks.push_back(H);
+    AllocatedBytes += CommitSize;
+    if (LargestSize < CommitSize)
+      LargestSize = CommitSize;
+    NumberOfAllocs++;
+    Stats.add(StatAllocated, CommitSize);
+    Stats.add(StatMapped, H->MapSize);
+  }
+  if (BlockEnd)
+    *BlockEnd = CommitBase + CommitSize;
+  return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
+}
+
+template <uptr MaxFreeListSize>
+void MapAllocator<MaxFreeListSize>::deallocate(void *Ptr) {
+  LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
+  {
+    ScopedLock L(Mutex);
+    InUseBlocks.remove(H);
+    const uptr CommitSize = H->BlockEnd - reinterpret_cast<uptr>(H);
+    FreedBytes += CommitSize;
+    NumberOfFrees++;
+    Stats.sub(StatAllocated, CommitSize);
+    if (MaxFreeListSize && FreeBlocks.size() < MaxFreeListSize) {
+      bool Inserted = false;
+      for (auto &F : FreeBlocks) {
+        const uptr FreeBlockSize = F.BlockEnd - reinterpret_cast<uptr>(&F);
+        if (FreeBlockSize >= CommitSize) {
+          FreeBlocks.insert(H, &F);
+          Inserted = true;
+          break;
+        }
+      }
+      if (!Inserted)
+        FreeBlocks.push_back(H);
+      const uptr RoundedAllocationStart =
+          roundUpTo(reinterpret_cast<uptr>(H) + LargeBlock::getHeaderSize(),
+                    getPageSizeCached());
+      MapPlatformData Data = H->Data;
+      // TODO(kostyak): use release_to_os_interval_ms
+      releasePagesToOS(H->MapBase, RoundedAllocationStart - H->MapBase,
+                       H->BlockEnd - RoundedAllocationStart, &Data);
+      return;
+    }
+    Stats.sub(StatMapped, H->MapSize);
+  }
+  void *Addr = reinterpret_cast<void *>(H->MapBase);
+  const uptr Size = H->MapSize;
+  MapPlatformData Data = H->Data;
+  unmap(Addr, Size, UNMAP_ALL, &Data);
+}
+
+template <uptr MaxFreeListSize>
+void MapAllocator<MaxFreeListSize>::getStats(ScopedString *Str) const {
+  Str->append(
+      "Stats: MapAllocator: allocated %zu times (%zuK), freed %zu times "
+      "(%zuK), remains %zu (%zuK) max %zuM\n",
+      NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees, FreedBytes >> 10,
+      NumberOfAllocs - NumberOfFrees, (AllocatedBytes - FreedBytes) >> 10,
+      LargestSize >> 20);
+}
+
 } // namespace scudo
 
 #endif // SCUDO_SECONDARY_H_

diff  --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index d74c07e8458b..e12d58392520 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -223,6 +223,7 @@ struct DeathConfig {
   // Tiny allocator, its Primary only serves chunks of 1024 bytes.
   using DeathSizeClassMap = scudo::SizeClassMap<1U, 10U, 10U, 10U, 1U, 10U>;
   typedef scudo::SizeClassAllocator32<DeathSizeClassMap, 18U> Primary;
+  typedef scudo::MapAllocator<0U> Secondary;
   template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U>;
 };
 

diff  --git a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
index b602b8d63e32..be620a6937c0 100644
--- a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
@@ -16,18 +16,20 @@
 #include <mutex>
 #include <thread>
 
-TEST(ScudoSecondaryTest, SecondaryBasic) {
+template <class SecondaryT> static void testSecondaryBasic(void) {
   scudo::GlobalStats S;
   S.init();
-  scudo::MapAllocator *L = new scudo::MapAllocator;
+  SecondaryT *L = new SecondaryT;
   L->init(&S);
   const scudo::uptr Size = 1U << 16;
   void *P = L->allocate(Size);
   EXPECT_NE(P, nullptr);
   memset(P, 'A', Size);
-  EXPECT_GE(scudo::MapAllocator::getBlockSize(P), Size);
+  EXPECT_GE(SecondaryT::getBlockSize(P), Size);
   L->deallocate(P);
-  EXPECT_DEATH(memset(P, 'A', Size), "");
+  // If we are not using a free list, blocks are unmapped on deallocation.
+  if (SecondaryT::getMaxFreeListSize() == 0U)
+    EXPECT_DEATH(memset(P, 'A', Size), "");
 
   const scudo::uptr Align = 1U << 16;
   P = L->allocate(Size + Align, Align);
@@ -50,13 +52,21 @@ TEST(ScudoSecondaryTest, SecondaryBasic) {
   Str.output();
 }
 
+TEST(ScudoSecondaryTest, SecondaryBasic) {
+  testSecondaryBasic<scudo::MapAllocator<>>();
+  testSecondaryBasic<scudo::MapAllocator<0U>>();
+  testSecondaryBasic<scudo::MapAllocator<64U>>();
+}
+
+using LargeAllocator = scudo::MapAllocator<>;
+
 // This exercises a variety of combinations of size and alignment for the
 // MapAllocator. The size computation done here mimic the ones done by the
 // combined allocator.
 TEST(ScudoSecondaryTest, SecondaryCombinations) {
   constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16);
   constexpr scudo::uptr HeaderSize = scudo::roundUpTo(8, MinAlign);
-  scudo::MapAllocator *L = new scudo::MapAllocator;
+  LargeAllocator *L = new LargeAllocator;
   L->init(nullptr);
   for (scudo::uptr SizeLog = 0; SizeLog <= 20; SizeLog++) {
     for (scudo::uptr AlignLog = FIRST_32_SECOND_64(3, 4); AlignLog <= 16;
@@ -84,7 +94,7 @@ TEST(ScudoSecondaryTest, SecondaryCombinations) {
 }
 
 TEST(ScudoSecondaryTest, SecondaryIterate) {
-  scudo::MapAllocator *L = new scudo::MapAllocator;
+  LargeAllocator *L = new LargeAllocator;
   L->init(nullptr);
   std::vector<void *> V;
   const scudo::uptr PageSize = scudo::getPageSizeCached();
@@ -110,7 +120,7 @@ static std::mutex Mutex;
 static std::condition_variable Cv;
 static bool Ready = false;
 
-static void performAllocations(scudo::MapAllocator *L) {
+static void performAllocations(LargeAllocator *L) {
   std::vector<void *> V;
   const scudo::uptr PageSize = scudo::getPageSizeCached();
   {
@@ -127,7 +137,7 @@ static void performAllocations(scudo::MapAllocator *L) {
 }
 
 TEST(ScudoSecondaryTest, SecondaryThreadsRace) {
-  scudo::MapAllocator *L = new scudo::MapAllocator;
+  LargeAllocator *L = new LargeAllocator;
   L->init(nullptr);
   std::thread Threads[10];
   for (scudo::uptr I = 0; I < 10U; I++)


        


More information about the llvm-commits mailing list