[compiler-rt] 0923963 - [scudo] Manage pages with MemMap in Secondary Allocator

Chia-hung Duan via llvm-commits llvm-commits at lists.llvm.org
Wed Apr 5 14:35:06 PDT 2023


Author: Chia-hung Duan
Date: 2023-04-05T21:28:55Z
New Revision: 09239636dce5e0089da40dc45ba9c98c53a3af99

URL: https://github.com/llvm/llvm-project/commit/09239636dce5e0089da40dc45ba9c98c53a3af99
DIFF: https://github.com/llvm/llvm-project/commit/09239636dce5e0089da40dc45ba9c98c53a3af99.diff

LOG: [scudo] Manage pages with MemMap in Secondary Allocator

Replace the uses of raw map()/unmap(), .etc calls with MemMap. Also
remove the direct use of MapPlatformData in the secondary allocator.

Also add setMemoryPermission() in MemMap.

Reviewed By: cryptoad

Differential Revision: https://reviews.llvm.org/D146454

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/mem_map.cpp
    compiler-rt/lib/scudo/standalone/mem_map.h
    compiler-rt/lib/scudo/standalone/mem_map_base.h
    compiler-rt/lib/scudo/standalone/secondary.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/mem_map.cpp b/compiler-rt/lib/scudo/standalone/mem_map.cpp
index 36cfc1f834226..662d684e5099a 100644
--- a/compiler-rt/lib/scudo/standalone/mem_map.cpp
+++ b/compiler-rt/lib/scudo/standalone/mem_map.cpp
@@ -46,6 +46,10 @@ void MemMapDefault::releaseAndZeroPagesToOSImpl(uptr From, uptr Size) {
   return ::scudo::releasePagesToOS(Base, From - Base, Size, &Data);
 }
 
+void MemMapDefault::setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags) {
+  return ::scudo::setMemoryPermission(Addr, Size, Flags);
+}
+
 void ReservedMemoryDefault::releaseImpl() {
   ::scudo::unmap(reinterpret_cast<void *>(Base), Capacity, UNMAP_ALL, &Data);
 }

diff  --git a/compiler-rt/lib/scudo/standalone/mem_map.h b/compiler-rt/lib/scudo/standalone/mem_map.h
index 58fbe9d01b7ed..6179e8aa0aaaa 100644
--- a/compiler-rt/lib/scudo/standalone/mem_map.h
+++ b/compiler-rt/lib/scudo/standalone/mem_map.h
@@ -34,6 +34,7 @@ class MemMapDefault final : public MemMapBase<MemMapDefault> {
   bool mapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
   void unmapImpl(uptr Addr, uptr Size);
   bool remapImpl(uptr Addr, uptr Size, const char *Name, uptr Flags);
+  void setMemoryPermissionImpl(uptr Addr, uptr Size, uptr Flags);
   void releasePagesToOSImpl(uptr From, uptr Size) {
     return releaseAndZeroPagesToOSImpl(From, Size);
   }

diff  --git a/compiler-rt/lib/scudo/standalone/mem_map_base.h b/compiler-rt/lib/scudo/standalone/mem_map_base.h
index f2b6a27736130..0560f4102d860 100644
--- a/compiler-rt/lib/scudo/standalone/mem_map_base.h
+++ b/compiler-rt/lib/scudo/standalone/mem_map_base.h
@@ -45,6 +45,15 @@ template <class Derived> class MemMapBase {
     return invokeImpl(&Derived::remapImpl, Addr, Size, Name, Flags);
   }
 
+  // This is used to update the pages' access permission. For example, mark
+  // pages as no read/write permission.
+  void setMemoryPermission(uptr Addr, uptr Size, uptr Flags) {
+    DCHECK(isAllocated());
+    DCHECK((Addr >= getBase()) || (Addr + Size <= getBase() + getCapacity()));
+    return static_cast<Derived *>(this)->setMemoryPermissionImpl(Addr, Size,
+                                                                 Flags);
+  }
+
   // Suggest releasing a set of contiguous physical pages back to the OS. Note
   // that only physical pages are supposed to be released. Any release of
   // virtual pages may lead to undefined behavior.

diff  --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index b30f51b9c494d..e6a82db8cb392 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -12,6 +12,7 @@
 #include "chunk.h"
 #include "common.h"
 #include "list.h"
+#include "mem_map.h"
 #include "memtag.h"
 #include "mutex.h"
 #include "options.h"
@@ -37,9 +38,7 @@ struct alignas(Max<uptr>(archSupportsMemoryTagging()
   LargeBlock::Header *Next;
   uptr CommitBase;
   uptr CommitSize;
-  uptr MapBase;
-  uptr MapSize;
-  [[no_unique_address]] MapPlatformData Data;
+  MemMapT MemMap;
 };
 
 static_assert(sizeof(Header) % (1U << SCUDO_MIN_ALIGNMENT_LOG) == 0, "");
@@ -66,8 +65,11 @@ template <typename Config> static Header *getHeader(const void *Ptr) {
 } // namespace LargeBlock
 
 static void unmap(LargeBlock::Header *H) {
-  MapPlatformData Data = H->Data;
-  unmap(reinterpret_cast<void *>(H->MapBase), H->MapSize, UNMAP_ALL, &Data);
+  // Note that the `H->MapMap` is stored on the pages managed by itself. Take
+  // over the ownership before unmap() so that any operation along with unmap()
+  // won't touch inaccessible pages.
+  MemMapT MemMap = H->MemMap;
+  MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
 }
 
 class MapAllocatorNoCache {
@@ -97,20 +99,19 @@ static const uptr MaxUnusedCachePages = 4U;
 
 template <typename Config>
 void mapSecondary(Options Options, uptr CommitBase, uptr CommitSize,
-                  uptr AllocPos, uptr Flags, MapPlatformData *Data) {
+                  uptr AllocPos, uptr Flags, MemMapT &MemMap) {
   const uptr MaxUnusedCacheBytes = MaxUnusedCachePages * getPageSizeCached();
   if (useMemoryTagging<Config>(Options) && CommitSize > MaxUnusedCacheBytes) {
     const uptr UntaggedPos = Max(AllocPos, CommitBase + MaxUnusedCacheBytes);
-    map(reinterpret_cast<void *>(CommitBase), UntaggedPos - CommitBase,
-        "scudo:secondary", MAP_RESIZABLE | MAP_MEMTAG | Flags, Data);
-    map(reinterpret_cast<void *>(UntaggedPos),
-        CommitBase + CommitSize - UntaggedPos, "scudo:secondary",
-        MAP_RESIZABLE | Flags, Data);
+    MemMap.remap(CommitBase, UntaggedPos - CommitBase, "scudo:secondary",
+                 MAP_RESIZABLE | MAP_MEMTAG | Flags);
+    MemMap.remap(UntaggedPos, CommitBase + CommitSize - UntaggedPos,
+                 "scudo:secondary", MAP_RESIZABLE | Flags);
   } else {
-    map(reinterpret_cast<void *>(CommitBase), CommitSize, "scudo:secondary",
+    const uptr RemapFlags =
         MAP_RESIZABLE | (useMemoryTagging<Config>(Options) ? MAP_MEMTAG : 0) |
-            Flags,
-        Data);
+        Flags;
+    MemMap.remap(CommitBase, CommitSize, "scudo:secondary", RemapFlags);
   }
 }
 
@@ -155,10 +156,8 @@ template <typename Config> class MapAllocatorCache {
     CachedBlock Entry;
     Entry.CommitBase = H->CommitBase;
     Entry.CommitSize = H->CommitSize;
-    Entry.MapBase = H->MapBase;
-    Entry.MapSize = H->MapSize;
     Entry.BlockBegin = reinterpret_cast<uptr>(H + 1);
-    Entry.Data = H->Data;
+    Entry.MemMap = H->MemMap;
     Entry.Time = Time;
     if (useMemoryTagging<Config>(Options)) {
       if (Interval == 0 && !SCUDO_FUCHSIA) {
@@ -168,13 +167,13 @@ template <typename Config> class MapAllocatorCache {
         // on top so we just do the two syscalls there.
         Entry.Time = 0;
         mapSecondary<Config>(Options, Entry.CommitBase, Entry.CommitSize,
-                             Entry.CommitBase, MAP_NOACCESS, &Entry.Data);
+                             Entry.CommitBase, MAP_NOACCESS, Entry.MemMap);
       } else {
-        setMemoryPermission(Entry.CommitBase, Entry.CommitSize, MAP_NOACCESS,
-                            &Entry.Data);
+        Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize,
+                                         MAP_NOACCESS);
       }
     } else if (Interval == 0) {
-      releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
+      Entry.MemMap.releasePagesToOS(Entry.CommitBase, Entry.CommitSize);
       Entry.Time = 0;
     }
     do {
@@ -223,8 +222,7 @@ template <typename Config> class MapAllocatorCache {
     else if (Interval >= 0)
       releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
     if (!EntryCached)
-      unmap(reinterpret_cast<void *>(Entry.MapBase), Entry.MapSize, UNMAP_ALL,
-            &Entry.Data);
+      Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
   }
 
   bool retrieve(Options Options, uptr Size, uptr Alignment,
@@ -267,7 +265,7 @@ template <typename Config> class MapAllocatorCache {
         LargeBlock::addHeaderTag<Config>(HeaderPos));
     *Zeroed = Entry.Time == 0;
     if (useMemoryTagging<Config>(Options))
-      setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0, &Entry.Data);
+      Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
     uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
     if (useMemoryTagging<Config>(Options)) {
       if (*Zeroed) {
@@ -282,9 +280,7 @@ template <typename Config> class MapAllocatorCache {
     }
     (*H)->CommitBase = Entry.CommitBase;
     (*H)->CommitSize = Entry.CommitSize;
-    (*H)->MapBase = Entry.MapBase;
-    (*H)->MapSize = Entry.MapSize;
-    (*H)->Data = Entry.Data;
+    (*H)->MemMap = Entry.MemMap;
     return true;
   }
 
@@ -323,16 +319,18 @@ template <typename Config> class MapAllocatorCache {
     ScopedLock L(Mutex);
     for (u32 I = 0; I != Config::SecondaryCacheQuarantineSize; ++I) {
       if (Quarantine[I].CommitBase) {
-        unmap(reinterpret_cast<void *>(Quarantine[I].MapBase),
-              Quarantine[I].MapSize, UNMAP_ALL, &Quarantine[I].Data);
+        MemMapT &MemMap = Quarantine[I].MemMap;
+        MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
         Quarantine[I].CommitBase = 0;
       }
     }
     const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
-    for (u32 I = 0; I < MaxCount; I++)
-      if (Entries[I].CommitBase)
-        setMemoryPermission(Entries[I].CommitBase, Entries[I].CommitSize, 0,
-                            &Entries[I].Data);
+    for (u32 I = 0; I < MaxCount; I++) {
+      if (Entries[I].CommitBase) {
+        Entries[I].MemMap.setMemoryPermission(Entries[I].CommitBase,
+                                              Entries[I].CommitSize, 0);
+      }
+    }
     QuarantinePos = -1U;
   }
 
@@ -344,38 +342,31 @@ template <typename Config> class MapAllocatorCache {
 
 private:
   void empty() {
-    struct {
-      void *MapBase;
-      uptr MapSize;
-      MapPlatformData Data;
-    } MapInfo[Config::SecondaryCacheEntriesArraySize];
+    MemMapT MapInfo[Config::SecondaryCacheEntriesArraySize];
     uptr N = 0;
     {
       ScopedLock L(Mutex);
       for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
         if (!Entries[I].CommitBase)
           continue;
-        MapInfo[N].MapBase = reinterpret_cast<void *>(Entries[I].MapBase);
-        MapInfo[N].MapSize = Entries[I].MapSize;
-        MapInfo[N].Data = Entries[I].Data;
+        MapInfo[N] = Entries[I].MemMap;
         Entries[I].CommitBase = 0;
         N++;
       }
       EntriesCount = 0;
       IsFullEvents = 0;
     }
-    for (uptr I = 0; I < N; I++)
-      unmap(MapInfo[I].MapBase, MapInfo[I].MapSize, UNMAP_ALL,
-            &MapInfo[I].Data);
+    for (uptr I = 0; I < N; I++) {
+      MemMapT &MemMap = MapInfo[I];
+      MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+    }
   }
 
   struct CachedBlock {
     uptr CommitBase;
     uptr CommitSize;
-    uptr MapBase;
-    uptr MapSize;
     uptr BlockBegin;
-    [[no_unique_address]] MapPlatformData Data;
+    MemMapT MemMap;
     u64 Time;
   };
 
@@ -387,7 +378,7 @@ template <typename Config> class MapAllocatorCache {
         OldestTime = Entry.Time;
       return;
     }
-    releasePagesToOS(Entry.CommitBase, 0, Entry.CommitSize, &Entry.Data);
+    Entry.MemMap.releasePagesToOS(Entry.CommitBase, Entry.CommitSize);
     Entry.Time = 0;
   }
 
@@ -538,16 +529,20 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
         AllocatedBytes += BlockSize;
         NumberOfAllocs++;
         Stats.add(StatAllocated, BlockSize);
-        Stats.add(StatMapped, H->MapSize);
+        Stats.add(StatMapped, H->MemMap.getCapacity());
       }
       return Ptr;
     }
   }
 
-  MapPlatformData Data = {};
+  ReservedMemoryT ReservedMemory;
   const uptr MapSize = RoundedSize + 2 * PageSize;
-  uptr MapBase = reinterpret_cast<uptr>(
-      map(nullptr, MapSize, nullptr, MAP_NOACCESS | MAP_ALLOWNOMEM, &Data));
+  ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr, MAP_ALLOWNOMEM);
+
+  // Take the entire ownership of reserved region.
+  MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
+                                           ReservedMemory.getCapacity());
+  uptr MapBase = MemMap.getBase();
   if (UNLIKELY(!MapBase))
     return nullptr;
   uptr CommitBase = MapBase + PageSize;
@@ -565,21 +560,21 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
     // We only trim the extra memory on 32-bit platforms: 64-bit platforms
     // are less constrained memory wise, and that saves us two syscalls.
     if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
-      unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0, &Data);
+      MemMap.unmap(MapBase, NewMapBase - MapBase);
       MapBase = NewMapBase;
     }
     const uptr NewMapEnd =
         CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
     DCHECK_LE(NewMapEnd, MapEnd);
     if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
-      unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0, &Data);
+      MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
       MapEnd = NewMapEnd;
     }
   }
 
   const uptr CommitSize = MapEnd - PageSize - CommitBase;
   const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
-  mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, &Data);
+  mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0, MemMap);
   const uptr HeaderPos =
       AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
   LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
@@ -587,11 +582,9 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
   if (useMemoryTagging<Config>(Options))
     storeTags(LargeBlock::addHeaderTag<Config>(CommitBase),
               reinterpret_cast<uptr>(H + 1));
-  H->MapBase = MapBase;
-  H->MapSize = MapEnd - MapBase;
   H->CommitBase = CommitBase;
   H->CommitSize = CommitSize;
-  H->Data = Data;
+  H->MemMap = MemMap;
   if (BlockEndPtr)
     *BlockEndPtr = CommitBase + CommitSize;
   {
@@ -602,7 +595,7 @@ void *MapAllocator<Config>::allocate(Options Options, uptr Size, uptr Alignment,
       LargestSize = CommitSize;
     NumberOfAllocs++;
     Stats.add(StatAllocated, CommitSize);
-    Stats.add(StatMapped, H->MapSize);
+    Stats.add(StatMapped, H->MemMap.getCapacity());
   }
   return reinterpret_cast<void *>(HeaderPos + LargeBlock::getHeaderSize());
 }
@@ -618,7 +611,7 @@ void MapAllocator<Config>::deallocate(Options Options, void *Ptr)
     FreedBytes += CommitSize;
     NumberOfFrees++;
     Stats.sub(StatAllocated, CommitSize);
-    Stats.sub(StatMapped, H->MapSize);
+    Stats.sub(StatMapped, H->MemMap.getCapacity());
   }
   Cache.store(Options, H);
 }


        


More information about the llvm-commits mailing list