[compiler-rt] fe6e77f - [scudo][standalone] 32-bit improvement

Kostya Kortchinsky via llvm-commits llvm-commits at lists.llvm.org
Fri Feb 7 11:17:00 PST 2020


Author: Kostya Kortchinsky
Date: 2020-02-07T11:16:48-08:00
New Revision: fe6e77f6fb9176ae09e1528ff46cb9c3588920a7

URL: https://github.com/llvm/llvm-project/commit/fe6e77f6fb9176ae09e1528ff46cb9c3588920a7
DIFF: https://github.com/llvm/llvm-project/commit/fe6e77f6fb9176ae09e1528ff46cb9c3588920a7.diff

LOG: [scudo][standalone] 32-bit improvement

Summary:
This tweaks some behaviors of the allocator wrt 32-bit, notably
tailoring the size-class map.

I had to remove a `printStats` from `__scudo_print_stats` since when
within Bionic they share the same slot so they can't coexist at the
same time. I have to find a solution for that later, but right now we
are not using the Svelte configuration.

Reviewers: rengolin

Subscribers: #sanitizers, llvm-commits

Tags: #sanitizers, #llvm

Differential Revision: https://reviews.llvm.org/D74178

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/primary32.h
    compiler-rt/lib/scudo/standalone/size_class_map.h
    compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index 50f6438ed38c..294043930e86 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -41,6 +41,8 @@ namespace scudo {
 template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
 public:
   typedef SizeClassMapT SizeClassMap;
+  // The bytemap can only track UINT8_MAX - 1 classes.
+  static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
   // Regions should be large enough to hold the largest Block.
   static_assert((1UL << RegionSizeLog) >= SizeClassMap::MaxSize, "");
   typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog> ThisT;
@@ -87,8 +89,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
     while (NumberOfStashedRegions > 0)
       unmap(reinterpret_cast<void *>(RegionsStash[--NumberOfStashedRegions]),
             RegionSize);
-    // TODO(kostyak): unmap the TransferBatch regions as well.
-    for (uptr I = 0; I < NumRegions; I++)
+    for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
       if (PossibleRegions[I])
         unmap(reinterpret_cast<void *>(I * RegionSize), RegionSize);
     PossibleRegions.unmapTestOnly();
@@ -147,8 +148,9 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
 
   template <typename F> void iterateOverBlocks(F Callback) {
     for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++)
-      if (PossibleRegions[I]) {
-        const uptr BlockSize = getSizeByClassId(PossibleRegions[I]);
+      if (PossibleRegions[I] &&
+          (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) {
+        const uptr BlockSize = getSizeByClassId(PossibleRegions[I] - 1U);
         const uptr From = I * RegionSize;
         const uptr To = From + (RegionSize / BlockSize) * BlockSize;
         for (uptr Block = From; Block < To; Block += BlockSize)
@@ -258,14 +260,12 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
     if (!Region)
       Region = allocateRegionSlow();
     if (LIKELY(Region)) {
-      if (ClassId) {
-        const uptr RegionIndex = computeRegionId(Region);
-        if (RegionIndex < MinRegionIndex)
-          MinRegionIndex = RegionIndex;
-        if (RegionIndex > MaxRegionIndex)
-          MaxRegionIndex = RegionIndex;
-        PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId));
-      }
+      const uptr RegionIndex = computeRegionId(Region);
+      if (RegionIndex < MinRegionIndex)
+        MinRegionIndex = RegionIndex;
+      if (RegionIndex > MaxRegionIndex)
+        MaxRegionIndex = RegionIndex;
+      PossibleRegions.set(RegionIndex, static_cast<u8>(ClassId + 1U));
     }
     return Region;
   }
@@ -350,10 +350,10 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
     const uptr InUse = Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks;
     const uptr AvailableChunks = Sci->AllocatedUser / getSizeByClassId(ClassId);
     Str->append("  %02zu (%6zu): mapped: %6zuK popped: %7zu pushed: %7zu "
-                "inuse: %6zu avail: %6zu rss: %6zuK\n",
+                "inuse: %6zu avail: %6zu rss: %6zuK releases: %6zu\n",
                 ClassId, getSizeByClassId(ClassId), Sci->AllocatedUser >> 10,
                 Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks, InUse,
-                AvailableChunks, Rss >> 10);
+                AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased);
   }
 
   NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
@@ -389,10 +389,11 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
     // regions. But it will have to do for now.
     uptr TotalReleasedBytes = 0;
     for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) {
-      if (PossibleRegions[I] == ClassId) {
-        ReleaseRecorder Recorder(I * RegionSize);
-        releaseFreeMemoryToOS(Sci->FreeList, I * RegionSize,
-                              RegionSize / PageSize, BlockSize, &Recorder);
+      if (PossibleRegions[I] - 1U == ClassId) {
+        const uptr Region = I * RegionSize;
+        ReleaseRecorder Recorder(Region);
+        releaseFreeMemoryToOS(Sci->FreeList, Region, RegionSize / PageSize,
+                              BlockSize, &Recorder);
         if (Recorder.getReleasedRangesCount() > 0) {
           Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks;
           Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount();
@@ -407,6 +408,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
 
   SizeClassInfo SizeClassInfoArray[NumClasses];
 
+  // Track the regions in use, 0 is unused, otherwise store ClassId + 1.
   ByteMap PossibleRegions;
   // Keep track of the lowest & highest regions allocated to avoid looping
   // through the whole NumRegions.

diff  --git a/compiler-rt/lib/scudo/standalone/size_class_map.h b/compiler-rt/lib/scudo/standalone/size_class_map.h
index 3849feaf38dd..ff587c97955d 100644
--- a/compiler-rt/lib/scudo/standalone/size_class_map.h
+++ b/compiler-rt/lib/scudo/standalone/size_class_map.h
@@ -144,7 +144,7 @@ typedef SizeClassMap<4, 4, 8, 14, 4, 10> SvelteSizeClassMap;
 typedef SizeClassMap<2, 5, 9, 16, 14, 14> AndroidSizeClassMap;
 #else
 typedef SizeClassMap<4, 3, 7, 14, 5, 10> SvelteSizeClassMap;
-typedef SizeClassMap<2, 5, 9, 16, 14, 14> AndroidSizeClassMap;
+typedef SizeClassMap<3, 4, 9, 16, 14, 14> AndroidSizeClassMap;
 #endif
 
 } // namespace scudo

diff  --git a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
index f004369d96cb..cab565b0664d 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
@@ -55,11 +55,7 @@ scudo::Allocator<scudo::AndroidSvelteConfig, SCUDO_PREFIX(malloc_postinit)> *
 #undef SCUDO_ALLOCATOR
 #undef SCUDO_PREFIX
 
-// The following is the only function that will end up initializing both
-// allocators, which will result in a slight increase in memory footprint.
-INTERFACE void __scudo_print_stats(void) {
-  Allocator.printStats();
-  SvelteAllocator.printStats();
-}
+// TODO(kostyak): support both allocators.
+INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); }
 
 #endif // SCUDO_ANDROID && _BIONIC


        


More information about the llvm-commits mailing list