[compiler-rt] 3d35246 - [scudo] Make guard pages optional in the secondary (#125960)

via llvm-commits llvm-commits at lists.llvm.org
Thu Feb 6 17:03:34 PST 2025


Author: Christopher Ferris
Date: 2025-02-06T17:03:30-08:00
New Revision: 3d35246c50ee67a71552ead70a6b0b15d7358ec5

URL: https://github.com/llvm/llvm-project/commit/3d35246c50ee67a71552ead70a6b0b15d7358ec5
DIFF: https://github.com/llvm/llvm-project/commit/3d35246c50ee67a71552ead70a6b0b15d7358ec5.diff

LOG: [scudo] Make guard pages optional in the secondary (#125960)

Add an optional flag for the secondary allocator called
`EnableGuardPages` to enable/disable the use of guard pages. By default,
this option is enabled.

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/allocator_config.def
    compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h
    compiler-rt/lib/scudo/standalone/secondary.h
    compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
    compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def
index ce37b1cfaedccce..43893e9af3cf395 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.def
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.def
@@ -31,6 +31,9 @@
 #ifndef SECONDARY_REQUIRED_TEMPLATE_TYPE
 #define SECONDARY_REQUIRED_TEMPLATE_TYPE(...)
 #endif
+#ifndef SECONDARY_OPTIONAL
+#define SECONDARY_OPTIONAL(...)
+#endif
 #ifndef SECONDARY_CACHE_OPTIONAL
 #define SECONDARY_CACHE_OPTIONAL(...)
 #endif
@@ -109,6 +112,11 @@ PRIMARY_OPTIONAL_TYPE(CompactPtrT, uptr)
 // Defines the type of Secondary Cache to use.
 SECONDARY_REQUIRED_TEMPLATE_TYPE(CacheT)
 
+// SECONDARY_OPTIONAL(TYPE, NAME, DEFAULT)
+//
+// Add one guard page at the front and back for each allocation.
+SECONDARY_OPTIONAL(const bool, EnableGuardPages, true)
+
 // SECONDARY_CACHE_OPTIONAL(TYPE, NAME, DEFAULT)
 //
 // Defines the type of cache used by the Secondary. Some additional
@@ -122,6 +130,7 @@ SECONDARY_CACHE_OPTIONAL(const s32, MaxReleaseToOsIntervalMs, INT32_MAX)
 SECONDARY_CACHE_OPTIONAL(const s32, DefaultReleaseToOsIntervalMs, INT32_MIN)
 
 #undef SECONDARY_CACHE_OPTIONAL
+#undef SECONDARY_OPTIONAL
 #undef SECONDARY_REQUIRED_TEMPLATE_TYPE
 #undef PRIMARY_OPTIONAL_TYPE
 #undef PRIMARY_OPTIONAL

diff  --git a/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h b/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h
index ea12a5b1447f69f..ac639ee9dd943f9 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h
+++ b/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h
@@ -95,6 +95,13 @@ template <typename AllocatorConfig> struct SecondaryConfig {
 #define SECONDARY_REQUIRED_TEMPLATE_TYPE(NAME)                                 \
   template <typename T>                                                        \
   using NAME = typename AllocatorConfig::Secondary::template NAME<T>;
+
+#define SECONDARY_OPTIONAL(TYPE, NAME, DEFAULT)                                \
+  OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT, NAME)                                 \
+  static constexpr removeConst<TYPE>::type get##NAME() {                       \
+    return NAME##State<typename AllocatorConfig::Secondary>::getValue();       \
+  }
+
 #include "allocator_config.def"
 
   struct CacheConfig {

diff  --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index 25b82356ca20f93..f3f91c40e7a0370 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -614,6 +614,12 @@ template <typename Config> class MapAllocator {
     return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
   }
 
+  static uptr getGuardPageSize() {
+    if (Config::getEnableGuardPages())
+      return getPageSizeCached();
+    return 0U;
+  }
+
   static constexpr uptr getHeadersSize() {
     return Chunk::getHeaderSize() + LargeBlock::getHeaderSize();
   }
@@ -763,11 +769,11 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
 
   uptr RoundedSize =
       roundUp(roundUp(Size, Alignment) + getHeadersSize(), PageSize);
-  if (Alignment > PageSize)
+  if (UNLIKELY(Alignment > PageSize))
     RoundedSize += Alignment - PageSize;
 
   ReservedMemoryT ReservedMemory;
-  const uptr MapSize = RoundedSize + 2 * PageSize;
+  const uptr MapSize = RoundedSize + 2 * getGuardPageSize();
   if (UNLIKELY(!ReservedMemory.create(/*Addr=*/0U, MapSize, nullptr,
                                       MAP_ALLOWNOMEM))) {
     return nullptr;
@@ -777,7 +783,7 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
   MemMapT MemMap = ReservedMemory.dispatch(ReservedMemory.getBase(),
                                            ReservedMemory.getCapacity());
   uptr MapBase = MemMap.getBase();
-  uptr CommitBase = MapBase + PageSize;
+  uptr CommitBase = MapBase + getGuardPageSize();
   uptr MapEnd = MapBase + MapSize;
 
   // In the unlikely event of alignments larger than a page, adjust the amount
@@ -786,25 +792,30 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
     // For alignments greater than or equal to a page, the user pointer (eg:
     // the pointer that is returned by the C or C++ allocation APIs) ends up
     // on a page boundary , and our headers will live in the preceding page.
-    CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize;
-    const uptr NewMapBase = CommitBase - PageSize;
-    DCHECK_GE(NewMapBase, MapBase);
+    CommitBase =
+        roundUp(MapBase + getGuardPageSize() + 1, Alignment) - PageSize;
     // We only trim the extra memory on 32-bit platforms: 64-bit platforms
     // are less constrained memory wise, and that saves us two syscalls.
-    if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
-      MemMap.unmap(MapBase, NewMapBase - MapBase);
-      MapBase = NewMapBase;
-    }
-    const uptr NewMapEnd =
-        CommitBase + PageSize + roundUp(Size, PageSize) + PageSize;
-    DCHECK_LE(NewMapEnd, MapEnd);
-    if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
-      MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
-      MapEnd = NewMapEnd;
+    if (SCUDO_WORDSIZE == 32U) {
+      const uptr NewMapBase = CommitBase - getGuardPageSize();
+      DCHECK_GE(NewMapBase, MapBase);
+      if (NewMapBase != MapBase) {
+        MemMap.unmap(MapBase, NewMapBase - MapBase);
+        MapBase = NewMapBase;
+      }
+      // CommitBase is past the first guard page, but this computation needs
+      // to include a page where the header lives.
+      const uptr NewMapEnd =
+          CommitBase + PageSize + roundUp(Size, PageSize) + getGuardPageSize();
+      DCHECK_LE(NewMapEnd, MapEnd);
+      if (NewMapEnd != MapEnd) {
+        MemMap.unmap(NewMapEnd, MapEnd - NewMapEnd);
+        MapEnd = NewMapEnd;
+      }
     }
   }
 
-  const uptr CommitSize = MapEnd - PageSize - CommitBase;
+  const uptr CommitSize = MapEnd - getGuardPageSize() - CommitBase;
   const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment);
   if (!mapSecondary<Config>(Options, CommitBase, CommitSize, AllocPos, 0,
                             MemMap)) {
@@ -812,6 +823,8 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
     return nullptr;
   }
   const uptr HeaderPos = AllocPos - getHeadersSize();
+  // Make sure that the header is not in the guard page or before the base.
+  DCHECK_GE(HeaderPos, MapBase + getGuardPageSize());
   LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(
       LargeBlock::addHeaderTag<Config>(HeaderPos));
   if (useMemoryTagging<Config>(Options))

diff  --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index ff98eb3397ee0e3..9d665ef88c402e8 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -54,16 +54,18 @@ void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size,
                              scudo::uptr Alignment) {
   const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG;
   Size = scudo::roundUp(Size, MinAlignment);
-  if (Allocator->useMemoryTaggingTestOnly())
+  if (Allocator->useMemoryTaggingTestOnly()) {
     EXPECT_DEATH(
         {
           disableDebuggerdMaybe();
           reinterpret_cast<char *>(P)[-1] = 'A';
         },
         "");
+  }
   if (isPrimaryAllocation<AllocatorT>(Size, Alignment)
           ? Allocator->useMemoryTaggingTestOnly()
-          : Alignment == MinAlignment) {
+          : Alignment == MinAlignment &&
+                AllocatorT::SecondaryT::getGuardPageSize() > 0) {
     EXPECT_DEATH(
         {
           disableDebuggerdMaybe();

diff  --git a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
index 518c1f2f0a6e65a..d8a7f6bd66ed212 100644
--- a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
@@ -80,6 +80,19 @@ struct TestNoCacheConfig {
   };
 };
 
+struct TestNoCacheNoGuardPageConfig {
+  static const bool MaySupportMemoryTagging = false;
+  template <typename> using TSDRegistryT = void;
+  template <typename> using PrimaryT = void;
+  template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
+
+  struct Secondary {
+    template <typename Config>
+    using CacheT = scudo::MapAllocatorNoCache<Config>;
+    static const bool EnableGuardPages = false;
+  };
+};
+
 struct TestCacheConfig {
   static const bool MaySupportMemoryTagging = false;
   template <typename> using TSDRegistryT = void;
@@ -100,6 +113,27 @@ struct TestCacheConfig {
   };
 };
 
+struct TestCacheNoGuardPageConfig {
+  static const bool MaySupportMemoryTagging = false;
+  template <typename> using TSDRegistryT = void;
+  template <typename> using PrimaryT = void;
+  template <typename> using SecondaryT = void;
+
+  struct Secondary {
+    struct Cache {
+      static const scudo::u32 EntriesArraySize = 128U;
+      static const scudo::u32 QuarantineSize = 0U;
+      static const scudo::u32 DefaultMaxEntriesCount = 64U;
+      static const scudo::uptr DefaultMaxEntrySize = 1UL << 20;
+      static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
+      static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+    };
+
+    template <typename Config> using CacheT = scudo::MapAllocatorCache<Config>;
+    static const bool EnableGuardPages = false;
+  };
+};
+
 template <typename Config> static void testBasic() {
   using SecondaryT = scudo::MapAllocator<scudo::SecondaryConfig<Config>>;
   AllocatorInfoType<Config> Info;
@@ -146,15 +180,17 @@ template <typename Config> static void testBasic() {
 
 TEST(ScudoSecondaryTest, Basic) {
   testBasic<TestNoCacheConfig>();
+  testBasic<TestNoCacheNoGuardPageConfig>();
   testBasic<TestCacheConfig>();
+  testBasic<TestCacheNoGuardPageConfig>();
   testBasic<scudo::DefaultConfig>();
 }
 
 // This exercises a variety of combinations of size and alignment for the
 // MapAllocator. The size computation done here mimic the ones done by the
 // combined allocator.
-TEST(ScudoSecondaryTest, AllocatorCombinations) {
-  AllocatorInfoType<TestNoCacheConfig> Info;
+template <typename Config> void testAllocatorCombinations() {
+  AllocatorInfoType<Config> Info;
 
   constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16);
   constexpr scudo::uptr HeaderSize = scudo::roundUp(8, MinAlign);
@@ -180,8 +216,13 @@ TEST(ScudoSecondaryTest, AllocatorCombinations) {
   }
 }
 
-TEST(ScudoSecondaryTest, AllocatorIterate) {
-  AllocatorInfoType<TestNoCacheConfig> Info;
+TEST(ScudoSecondaryTest, AllocatorCombinations) {
+  testAllocatorCombinations<TestNoCacheConfig>();
+  testAllocatorCombinations<TestNoCacheNoGuardPageConfig>();
+}
+
+template <typename Config> void testAllocatorIterate() {
+  AllocatorInfoType<Config> Info;
 
   std::vector<void *> V;
   for (scudo::uptr I = 0; I < 32U; I++)
@@ -201,8 +242,13 @@ TEST(ScudoSecondaryTest, AllocatorIterate) {
   }
 }
 
-TEST(ScudoSecondaryTest, AllocatorWithReleaseThreadsRace) {
-  AllocatorInfoType<TestNoCacheConfig> Info(/*ReleaseToOsInterval=*/0);
+TEST(ScudoSecondaryTest, AllocatorIterate) {
+  testAllocatorIterate<TestNoCacheConfig>();
+  testAllocatorIterate<TestNoCacheNoGuardPageConfig>();
+}
+
+template <typename Config> void testAllocatorWithReleaseThreadsRace() {
+  AllocatorInfoType<Config> Info(/*ReleaseToOsInterval=*/0);
 
   std::mutex Mutex;
   std::condition_variable Cv;
@@ -243,6 +289,64 @@ TEST(ScudoSecondaryTest, AllocatorWithReleaseThreadsRace) {
     T.join();
 }
 
+TEST(ScudoSecondaryTest, AllocatorWithReleaseThreadsRace) {
+  testAllocatorWithReleaseThreadsRace<TestNoCacheConfig>();
+  testAllocatorWithReleaseThreadsRace<TestNoCacheNoGuardPageConfig>();
+}
+
+template <typename Config>
+void testGetMappedSize(scudo::uptr Size, scudo::uptr *mapped,
+                       scudo::uptr *guard_page_size) {
+  AllocatorInfoType<Config> Info;
+
+  scudo::uptr Stats[scudo::StatCount] = {};
+  Info.GlobalStats.get(Stats);
+  *mapped = Stats[scudo::StatMapped];
+  Stats[scudo::StatMapped] = 0;
+
+  // Make sure the allocation is aligned to a page boundary so that the checks
+  // in the tests can avoid problems due to allocations having 
diff erent
+  // alignments.
+  void *Ptr = Info.Allocator->allocate(Info.Options, Size, PageSize);
+  EXPECT_NE(Ptr, nullptr);
+
+  Info.GlobalStats.get(Stats);
+  EXPECT_GE(Stats[scudo::StatMapped], *mapped);
+  *mapped = Stats[scudo::StatMapped] - *mapped;
+
+  Info.Allocator->deallocate(Info.Options, Ptr);
+
+  *guard_page_size = Info.Allocator->getGuardPageSize();
+}
+
+TEST(ScudoSecondaryTest, VerifyGuardPageOption) {
+  static scudo::uptr AllocSize = 1000 * PageSize;
+
+  // Verify that a config with guard pages enabled:
+  //  - Non-zero sized guard page
+  //  - Mapped in at least the size of the allocation plus 2 * guard page size
+  scudo::uptr guard_mapped = 0;
+  scudo::uptr guard_page_size = 0;
+  testGetMappedSize<TestNoCacheConfig>(AllocSize, &guard_mapped,
+                                       &guard_page_size);
+  EXPECT_GT(guard_page_size, 0U);
+  EXPECT_GE(guard_mapped, AllocSize + 2 * guard_page_size);
+
+  // Verify that a config with guard pages disabled:
+  //  - Zero sized guard page
+  //  - The total mapped in is greater than the allocation size
+  scudo::uptr no_guard_mapped = 0;
+  scudo::uptr no_guard_page_size = 0;
+  testGetMappedSize<TestNoCacheNoGuardPageConfig>(AllocSize, &no_guard_mapped,
+                                                  &no_guard_page_size);
+  EXPECT_EQ(no_guard_page_size, 0U);
+  EXPECT_GE(no_guard_mapped, AllocSize);
+
+  // Verify that a guard page config mapped in at least twice the size of
+  // their guard page when compared to a no guard page config.
+  EXPECT_GE(guard_mapped, no_guard_mapped + guard_page_size * 2);
+}
+
 // Value written to cache entries that are unmapped.
 static scudo::u32 UnmappedMarker = 0xDEADBEEF;
 


        


More information about the llvm-commits mailing list