[compiler-rt] 2f08a08 - [scudo] Support importing custom configuration

Chia-hung Duan via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 2 09:30:35 PDT 2023


Author: Chia-hung Duan
Date: 2023-06-02T16:28:00Z
New Revision: 2f08a08f6102898c862802401c36fe4d33f590ef

URL: https://github.com/llvm/llvm-project/commit/2f08a08f6102898c862802401c36fe4d33f590ef
DIFF: https://github.com/llvm/llvm-project/commit/2f08a08f6102898c862802401c36fe4d33f590ef.diff

LOG: [scudo] Support importing custom configuration

To define custom allocation, you only need to put the configuration in
custom_scudo_config.h and define two required aliases, then you will be
switched to the customized config and the tests will also run with your
configuration.

In this CL, we also have a minor refactor the structure of
configuration. Now the essential fields are put under the associated
hierarchy and which will make the defining new configuration easier.

Reviewed By: cferris

Differential Revision: https://reviews.llvm.org/D150481

Added: 
    

Modified: 
    compiler-rt/lib/scudo/standalone/allocator_config.h
    compiler-rt/lib/scudo/standalone/combined.h
    compiler-rt/lib/scudo/standalone/primary32.h
    compiler-rt/lib/scudo/standalone/primary64.h
    compiler-rt/lib/scudo/standalone/secondary.h
    compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
    compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
    compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/allocator_config.h b/compiler-rt/lib/scudo/standalone/allocator_config.h
index d06f6dfe4e0a9..df972d1ea5d22 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.h
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.h
@@ -19,6 +19,22 @@
 #include "tsd_exclusive.h"
 #include "tsd_shared.h"
 
+// To import a custom configuration, define `SCUDO_USE_CUSTOM_CONFIG` and
+// aliasing the `Config` like:
+//
+// namespace scudo {
+//   // The instance of Scudo will be initiated with `Config`.
+//   typedef CustomConfig Config;
+//   // Aliasing as default configuration to run the tests with this config.
+//   typedef CustomConfig DefaultConfig;
+// } // namespace scudo
+//
+// Put them in the header `custom_scudo_config.h` then you will be using the
+// custom configuration and able to run all the tests as well.
+#ifdef SCUDO_USE_CUSTOM_CONFIG
+#include "custom_scudo_config.h"
+#endif
+
 namespace scudo {
 
 // The combined allocator uses a structure as a template argument that
@@ -26,191 +42,265 @@ namespace scudo {
 // allocator.
 //
 // struct ExampleConfig {
-//   // SizeClassMap to use with the Primary.
-//   using SizeClassMap = DefaultSizeClassMap;
 //   // Indicates possible support for Memory Tagging.
 //   static const bool MaySupportMemoryTagging = false;
-//   // Defines the Primary allocator to use.
-//   typedef SizeClassAllocator64<ExampleConfig> Primary;
-//   // Log2 of the size of a size class region, as used by the Primary.
-//   static const uptr PrimaryRegionSizeLog = 30U;
-//   // Log2 of the size of block group, as used by the Primary. Each group
-//   // contains a range of memory addresses, blocks in the range will belong to
-//   // the same group. In general, single region may have 1 or 2MB group size.
-//   // Multiple regions will have the group size equal to the region size
-//   // because the region size is usually smaller than 1 MB.
-//   // Smaller value gives fine-grained control of memory usage but the trade
-//   // off is that it may take longer time of deallocation.
-//   static const uptr PrimaryGroupSizeLog = 20U;
-//   // Defines the type and scale of a compact pointer. A compact pointer can
-//   // be understood as the offset of a pointer within the region it belongs
-//   // to, in increments of a power-of-2 scale.
-//   // eg: Ptr = Base + (CompactPtr << Scale).
-//   typedef u32 PrimaryCompactPtrT;
-//   static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
-//   // Indicates support for offsetting the start of a region by
-//   // a random number of pages. Only used with primary64.
-//   static const bool PrimaryEnableRandomOffset = true;
-//   // Call map for user memory with at least this size. Only used with
-//   // primary64.
-//   static const uptr PrimaryMapSizeIncrement = 1UL << 18;
-//   // Defines the minimal & maximal release interval that can be set.
-//   static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
-//   static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
-//   // Defines the type of cache used by the Secondary. Some additional
-//   // configuration entries can be necessary depending on the Cache.
-//   typedef MapAllocatorNoCache SecondaryCache;
+//
 //   // Thread-Specific Data Registry used, shared or exclusive.
 //   template <class A> using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>;
+//
+//   struct Primary {
+//     // SizeClassMap to use with the Primary.
+//     using SizeClassMap = DefaultSizeClassMap;
+//
+//     // Log2 of the size of a size class region, as used by the Primary.
+//     static const uptr RegionSizeLog = 30U;
+//
+//     // Log2 of the size of block group, as used by the Primary. Each group
+//     // contains a range of memory addresses, blocks in the range will belong
+//     // to the same group. In general, single region may have 1 or 2MB group
+//     // size. Multiple regions will have the group size equal to the region
+//     // size because the region size is usually smaller than 1 MB.
+//     // Smaller value gives fine-grained control of memory usage but the
+//     // trade-off is that it may take longer time of deallocation.
+//     static const uptr GroupSizeLog = 20U;
+//
+//     // Defines the type and scale of a compact pointer. A compact pointer can
+//     // be understood as the offset of a pointer within the region it belongs
+//     // to, in increments of a power-of-2 scale.
+//     // eg: Ptr = Base + (CompactPtr << Scale).
+//     typedef u32 CompactPtrT;
+//     static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+//
+//     // Indicates support for offsetting the start of a region by
+//     // a random number of pages. Only used with primary64.
+//     static const bool EnableRandomOffset = true;
+//
+//     // Call map for user memory with at least this size. Only used with
+//     // primary64.
+//     static const uptr MapSizeIncrement = 1UL << 18;
+//
+//     // Defines the minimal & maximal release interval that can be set.
+//     static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+//     static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+//   };
+//   // Defines the type of Primary allocator to use.
+//   template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+//
+//   // Defines the type of cache used by the Secondary. Some additional
+//   // configuration entries can be necessary depending on the Cache.
+//   struct Secondary {
+//     struct Cache {
+//       static const u32 EntriesArraySize = 32U;
+//       static const u32 QuarantineSize = 0U;
+//       static const u32 DefaultMaxEntriesCount = 32U;
+//       static const uptr DefaultMaxEntrySize = 1UL << 19;
+//       static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+//       static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+//     };
+//     // Defines the type of Secondary Cache to use.
+//     template <typename Config> using CacheT = MapAllocatorCache<Config>;
+//   };
+//   // Defines the type of Secondary allocator to use.
+//   template <typename Config> using SecondaryT = MapAllocator<Config>;
 // };
 
-// Default configurations for various platforms.
+#ifndef SCUDO_USE_CUSTOM_CONFIG
 
+// Default configurations for various platforms. Note this is only enabled when
+// there's no custom configuration in the build system.
 struct DefaultConfig {
-  using SizeClassMap = DefaultSizeClassMap;
   static const bool MaySupportMemoryTagging = true;
+  template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
 
+  struct Primary {
+    using SizeClassMap = DefaultSizeClassMap;
+#if SCUDO_CAN_USE_PRIMARY64
+    static const uptr RegionSizeLog = 32U;
+    static const uptr GroupSizeLog = 21U;
+    typedef uptr CompactPtrT;
+    static const uptr CompactPtrScale = 0;
+    static const bool EnableRandomOffset = true;
+    static const uptr MapSizeIncrement = 1UL << 18;
+#else
+    static const uptr RegionSizeLog = 19U;
+    static const uptr GroupSizeLog = 19U;
+    typedef uptr CompactPtrT;
+#endif
+    static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+    static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+  };
 #if SCUDO_CAN_USE_PRIMARY64
-  typedef SizeClassAllocator64<DefaultConfig> Primary;
-  static const uptr PrimaryRegionSizeLog = 32U;
-  static const uptr PrimaryGroupSizeLog = 21U;
-  typedef uptr PrimaryCompactPtrT;
-  static const uptr PrimaryCompactPtrScale = 0;
-  static const bool PrimaryEnableRandomOffset = true;
-  static const uptr PrimaryMapSizeIncrement = 1UL << 18;
+  template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
 #else
-  typedef SizeClassAllocator32<DefaultConfig> Primary;
-  static const uptr PrimaryRegionSizeLog = 19U;
-  static const uptr PrimaryGroupSizeLog = 19U;
-  typedef uptr PrimaryCompactPtrT;
+  template <typename Config> using PrimaryT = SizeClassAllocator32<Config>;
 #endif
-  static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
-  static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
 
-  typedef MapAllocatorCache<DefaultConfig> SecondaryCache;
-  static const u32 SecondaryCacheEntriesArraySize = 32U;
-  static const u32 SecondaryCacheQuarantineSize = 0U;
-  static const u32 SecondaryCacheDefaultMaxEntriesCount = 32U;
-  static const uptr SecondaryCacheDefaultMaxEntrySize = 1UL << 19;
-  static const s32 SecondaryCacheMinReleaseToOsIntervalMs = INT32_MIN;
-  static const s32 SecondaryCacheMaxReleaseToOsIntervalMs = INT32_MAX;
+  struct Secondary {
+    struct Cache {
+      static const u32 EntriesArraySize = 32U;
+      static const u32 QuarantineSize = 0U;
+      static const u32 DefaultMaxEntriesCount = 32U;
+      static const uptr DefaultMaxEntrySize = 1UL << 19;
+      static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+      static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+    };
+    template <typename Config> using CacheT = MapAllocatorCache<Config>;
+  };
 
-  template <class A> using TSDRegistryT = TSDRegistryExT<A>; // Exclusive
+  template <typename Config> using SecondaryT = MapAllocator<Config>;
 };
+
+#endif // SCUDO_USE_CUSTOM_CONFIG
+
 struct AndroidConfig {
-  using SizeClassMap = AndroidSizeClassMap;
   static const bool MaySupportMemoryTagging = true;
+  template <class A>
+  using TSDRegistryT = TSDRegistrySharedT<A, 8U, 2U>; // Shared, max 8 TSDs.
 
+  struct Primary {
+    using SizeClassMap = AndroidSizeClassMap;
+#if SCUDO_CAN_USE_PRIMARY64
+    static const uptr RegionSizeLog = 28U;
+    typedef u32 CompactPtrT;
+    static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+    static const uptr GroupSizeLog = 20U;
+    static const bool EnableRandomOffset = true;
+    static const uptr MapSizeIncrement = 1UL << 18;
+#else
+    static const uptr RegionSizeLog = 18U;
+    static const uptr GroupSizeLog = 18U;
+    typedef uptr CompactPtrT;
+#endif
+    static const s32 MinReleaseToOsIntervalMs = 1000;
+    static const s32 MaxReleaseToOsIntervalMs = 1000;
+  };
 #if SCUDO_CAN_USE_PRIMARY64
-  typedef SizeClassAllocator64<AndroidConfig> Primary;
-  static const uptr PrimaryRegionSizeLog = 28U;
-  typedef u32 PrimaryCompactPtrT;
-  static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
-  static const uptr PrimaryGroupSizeLog = 20U;
-  static const bool PrimaryEnableRandomOffset = true;
-  static const uptr PrimaryMapSizeIncrement = 1UL << 18;
+  template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
 #else
-  typedef SizeClassAllocator32<AndroidConfig> Primary;
-  static const uptr PrimaryRegionSizeLog = 18U;
-  static const uptr PrimaryGroupSizeLog = 18U;
-  typedef uptr PrimaryCompactPtrT;
+  template <typename Config> using PrimaryT = SizeClassAllocator32<Config>;
 #endif
-  static const s32 PrimaryMinReleaseToOsIntervalMs = 1000;
-  static const s32 PrimaryMaxReleaseToOsIntervalMs = 1000;
 
-  typedef MapAllocatorCache<AndroidConfig> SecondaryCache;
-  static const u32 SecondaryCacheEntriesArraySize = 256U;
-  static const u32 SecondaryCacheQuarantineSize = 32U;
-  static const u32 SecondaryCacheDefaultMaxEntriesCount = 32U;
-  static const uptr SecondaryCacheDefaultMaxEntrySize = 2UL << 20;
-  static const s32 SecondaryCacheMinReleaseToOsIntervalMs = 0;
-  static const s32 SecondaryCacheMaxReleaseToOsIntervalMs = 1000;
+  struct Secondary {
+    struct Cache {
+      static const u32 EntriesArraySize = 256U;
+      static const u32 QuarantineSize = 32U;
+      static const u32 DefaultMaxEntriesCount = 32U;
+      static const uptr DefaultMaxEntrySize = 2UL << 20;
+      static const s32 MinReleaseToOsIntervalMs = 0;
+      static const s32 MaxReleaseToOsIntervalMs = 1000;
+    };
+    template <typename Config> using CacheT = MapAllocatorCache<Config>;
+  };
 
-  template <class A>
-  using TSDRegistryT = TSDRegistrySharedT<A, 8U, 2U>; // Shared, max 8 TSDs.
+  template <typename Config> using SecondaryT = MapAllocator<Config>;
 };
 
 struct AndroidSvelteConfig {
-  using SizeClassMap = SvelteSizeClassMap;
   static const bool MaySupportMemoryTagging = false;
+  template <class A>
+  using TSDRegistryT = TSDRegistrySharedT<A, 2U, 1U>; // Shared, max 2 TSDs.
 
+  struct Primary {
+    using SizeClassMap = SvelteSizeClassMap;
 #if SCUDO_CAN_USE_PRIMARY64
-  typedef SizeClassAllocator64<AndroidSvelteConfig> Primary;
-  static const uptr PrimaryRegionSizeLog = 27U;
-  typedef u32 PrimaryCompactPtrT;
-  static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
-  static const uptr PrimaryGroupSizeLog = 18U;
-  static const bool PrimaryEnableRandomOffset = true;
-  static const uptr PrimaryMapSizeIncrement = 1UL << 18;
+    static const uptr RegionSizeLog = 27U;
+    typedef u32 CompactPtrT;
+    static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+    static const uptr GroupSizeLog = 18U;
+    static const bool EnableRandomOffset = true;
+    static const uptr MapSizeIncrement = 1UL << 18;
 #else
-  typedef SizeClassAllocator32<AndroidSvelteConfig> Primary;
-  static const uptr PrimaryRegionSizeLog = 16U;
-  static const uptr PrimaryGroupSizeLog = 16U;
-  typedef uptr PrimaryCompactPtrT;
+    static const uptr RegionSizeLog = 16U;
+    static const uptr GroupSizeLog = 16U;
+    typedef uptr CompactPtrT;
 #endif
-  static const s32 PrimaryMinReleaseToOsIntervalMs = 1000;
-  static const s32 PrimaryMaxReleaseToOsIntervalMs = 1000;
+    static const s32 MinReleaseToOsIntervalMs = 1000;
+    static const s32 MaxReleaseToOsIntervalMs = 1000;
+  };
 
-  typedef MapAllocatorCache<AndroidSvelteConfig> SecondaryCache;
-  static const u32 SecondaryCacheEntriesArraySize = 16U;
-  static const u32 SecondaryCacheQuarantineSize = 32U;
-  static const u32 SecondaryCacheDefaultMaxEntriesCount = 4U;
-  static const uptr SecondaryCacheDefaultMaxEntrySize = 1UL << 18;
-  static const s32 SecondaryCacheMinReleaseToOsIntervalMs = 0;
-  static const s32 SecondaryCacheMaxReleaseToOsIntervalMs = 0;
+#if SCUDO_CAN_USE_PRIMARY64
+  template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+#else
+  template <typename Config> using PrimaryT = SizeClassAllocator32<Config>;
+#endif
 
-  template <class A>
-  using TSDRegistryT = TSDRegistrySharedT<A, 2U, 1U>; // Shared, max 2 TSDs.
+  struct Secondary {
+    struct Cache {
+      static const u32 EntriesArraySize = 16U;
+      static const u32 QuarantineSize = 32U;
+      static const u32 DefaultMaxEntriesCount = 4U;
+      static const uptr DefaultMaxEntrySize = 1UL << 18;
+      static const s32 MinReleaseToOsIntervalMs = 0;
+      static const s32 MaxReleaseToOsIntervalMs = 0;
+    };
+    template <typename Config> using CacheT = MapAllocatorCache<Config>;
+  };
+
+  template <typename Config> using SecondaryT = MapAllocator<Config>;
 };
 
 #if SCUDO_CAN_USE_PRIMARY64
 struct FuchsiaConfig {
-  using SizeClassMap = FuchsiaSizeClassMap;
   static const bool MaySupportMemoryTagging = false;
+  template <class A>
+  using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
 
-  typedef SizeClassAllocator64<FuchsiaConfig> Primary;
-// Support 39-bit VMA for riscv-64
+  struct Primary {
+    using SizeClassMap = FuchsiaSizeClassMap;
 #if SCUDO_RISCV64
-  static const uptr PrimaryRegionSizeLog = 28U;
-  static const uptr PrimaryGroupSizeLog = 19U;
+    // Support 39-bit VMA for riscv-64
+    static const uptr RegionSizeLog = 28U;
+    static const uptr GroupSizeLog = 19U;
 #else
-  static const uptr PrimaryRegionSizeLog = 30U;
-  static const uptr PrimaryGroupSizeLog = 21U;
+    static const uptr RegionSizeLog = 30U;
+    static const uptr GroupSizeLog = 21U;
 #endif
-  typedef u32 PrimaryCompactPtrT;
-  static const bool PrimaryEnableRandomOffset = true;
-  static const uptr PrimaryMapSizeIncrement = 1UL << 18;
-  static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
-  static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
-  static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
-
-  typedef MapAllocatorNoCache SecondaryCache;
-  template <class A>
-  using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>; // Shared, max 8 TSDs.
+    typedef u32 CompactPtrT;
+    static const bool EnableRandomOffset = true;
+    static const uptr MapSizeIncrement = 1UL << 18;
+    static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+    static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+    static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+  };
+  template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+
+  struct Secondary {
+    template <typename Config> using CacheT = MapAllocatorNoCache<Config>;
+  };
+  template <typename Config> using SecondaryT = MapAllocator<Config>;
 };
 
 struct TrustyConfig {
-  using SizeClassMap = TrustySizeClassMap;
   static const bool MaySupportMemoryTagging = false;
-
-  typedef SizeClassAllocator64<TrustyConfig> Primary;
-  // Some apps have 1 page of heap total so small regions are necessary.
-  static const uptr PrimaryRegionSizeLog = 10U;
-  static const uptr PrimaryGroupSizeLog = 10U;
-  typedef u32 PrimaryCompactPtrT;
-  static const bool PrimaryEnableRandomOffset = false;
-  // Trusty is extremely memory-constrained so minimally round up map calls.
-  static const uptr PrimaryMapSizeIncrement = 1UL << 4;
-  static const uptr PrimaryCompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
-  static const s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
-  static const s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
-
-  typedef MapAllocatorNoCache SecondaryCache;
   template <class A>
   using TSDRegistryT = TSDRegistrySharedT<A, 1U, 1U>; // Shared, max 1 TSD.
+
+  struct Primary {
+    static const bool MaySupportMemoryTagging = false;
+    using SizeClassMap = TrustySizeClassMap;
+    // Some apps have 1 page of heap total so small regions are necessary.
+    static const uptr RegionSizeLog = 10U;
+    static const uptr GroupSizeLog = 10U;
+    typedef u32 CompactPtrT;
+    static const bool EnableRandomOffset = false;
+    // Trusty is extremely memory-constrained so minimally round up map calls.
+    static const uptr MapSizeIncrement = 1UL << 4;
+    static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
+    static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
+    static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+  };
+  template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
+
+  struct Secondary {
+    template <typename Config> using CacheT = MapAllocatorNoCache<Config>;
+  };
+
+  template <typename Config> using SecondaryT = MapAllocator<Config>;
 };
 #endif
 
+#ifndef SCUDO_USE_CUSTOM_CONFIG
+
 #if SCUDO_ANDROID
 typedef AndroidConfig Config;
 #elif SCUDO_FUCHSIA
@@ -221,6 +311,8 @@ typedef TrustyConfig Config;
 typedef DefaultConfig Config;
 #endif
 
+#endif // SCUDO_USE_CUSTOM_CONFIG
+
 } // namespace scudo
 
 #endif // SCUDO_ALLOCATOR_CONFIG_H_

diff  --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 52e2674400fb7..e3ec72d4ea1e1 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -43,13 +43,14 @@ extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf,
 
 namespace scudo {
 
-template <class Params, void (*PostInitCallback)(void) = EmptyCallback>
+template <class Config, void (*PostInitCallback)(void) = EmptyCallback>
 class Allocator {
 public:
-  using PrimaryT = typename Params::Primary;
+  using PrimaryT = typename Config::template PrimaryT<Config>;
+  using SecondaryT = typename Config::template SecondaryT<Config>;
   using CacheT = typename PrimaryT::CacheT;
-  typedef Allocator<Params, PostInitCallback> ThisT;
-  typedef typename Params::template TSDRegistryT<ThisT> TSDRegistryT;
+  typedef Allocator<Config, PostInitCallback> ThisT;
+  typedef typename Config::template TSDRegistryT<ThisT> TSDRegistryT;
 
   void callPostInitCallback() {
     pthread_once(&PostInitNonce, PostInitCallback);
@@ -71,7 +72,7 @@ class Allocator {
       NewHeader.State = Chunk::State::Available;
       Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header);
 
-      if (allocatorSupportsMemoryTagging<Params>())
+      if (allocatorSupportsMemoryTagging<Config>())
         Ptr = untagPointer(Ptr);
       void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader);
       Cache.deallocate(NewHeader.ClassId, BlockBegin);
@@ -98,7 +99,7 @@ class Allocator {
 
       // Reset tag to 0 as this chunk may have been previously used for a tagged
       // user allocation.
-      if (UNLIKELY(useMemoryTagging<Params>(Allocator.Primary.Options.load())))
+      if (UNLIKELY(useMemoryTagging<Config>(Allocator.Primary.Options.load())))
         storeTags(reinterpret_cast<uptr>(Ptr),
                   reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
 
@@ -162,7 +163,7 @@ class Allocator {
       Primary.Options.set(OptionBit::DeallocTypeMismatch);
     if (getFlags()->delete_size_mismatch)
       Primary.Options.set(OptionBit::DeleteSizeMismatch);
-    if (allocatorSupportsMemoryTagging<Params>() &&
+    if (allocatorSupportsMemoryTagging<Config>() &&
         systemSupportsMemoryTagging())
       Primary.Options.set(OptionBit::UseMemoryTagging);
     Primary.Options.set(OptionBit::UseOddEvenTags);
@@ -264,7 +265,7 @@ class Allocator {
   void drainCaches() { TSDRegistry.drainCaches(this); }
 
   ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
-    if (!allocatorSupportsMemoryTagging<Params>())
+    if (!allocatorSupportsMemoryTagging<Config>())
       return Ptr;
     auto UntaggedPtr = untagPointer(Ptr);
     if (UntaggedPtr != Ptr)
@@ -276,7 +277,7 @@ class Allocator {
   }
 
   ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
-    if (!allocatorSupportsMemoryTagging<Params>())
+    if (!allocatorSupportsMemoryTagging<Config>())
       return Ptr;
     return addFixedTag(Ptr, 2);
   }
@@ -427,7 +428,7 @@ class Allocator {
       //
       // When memory tagging is enabled, zeroing the contents is done as part of
       // setting the tag.
-      if (UNLIKELY(useMemoryTagging<Params>(Options))) {
+      if (UNLIKELY(useMemoryTagging<Config>(Options))) {
         uptr PrevUserPtr;
         Chunk::UnpackedHeader Header;
         const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
@@ -509,7 +510,7 @@ class Allocator {
     } else {
       Block = addHeaderTag(Block);
       Ptr = addHeaderTag(Ptr);
-      if (UNLIKELY(useMemoryTagging<Params>(Options))) {
+      if (UNLIKELY(useMemoryTagging<Config>(Options))) {
         storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
         storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
       }
@@ -676,7 +677,7 @@ class Allocator {
                            (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
             Chunk::SizeOrUnusedBytesMask;
         Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
-        if (UNLIKELY(useMemoryTagging<Params>(Options))) {
+        if (UNLIKELY(useMemoryTagging<Config>(Options))) {
           if (ClassId) {
             resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
                               reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
@@ -772,7 +773,7 @@ class Allocator {
       Base = untagPointer(Base);
     const uptr From = Base;
     const uptr To = Base + Size;
-    bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging<Params>() &&
+    bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging<Config>() &&
                                 systemSupportsMemoryTagging();
     auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
                    Arg](uptr Block) {
@@ -794,9 +795,9 @@ class Allocator {
       }
       if (Header.State == Chunk::State::Allocated) {
         uptr TaggedChunk = Chunk;
-        if (allocatorSupportsMemoryTagging<Params>())
+        if (allocatorSupportsMemoryTagging<Config>())
           TaggedChunk = untagPointer(TaggedChunk);
-        if (useMemoryTagging<Params>(Primary.Options.load()))
+        if (useMemoryTagging<Config>(Primary.Options.load()))
           TaggedChunk = loadTag(Chunk);
         Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
                  Arg);
@@ -895,7 +896,7 @@ class Allocator {
   }
 
   bool useMemoryTaggingTestOnly() const {
-    return useMemoryTagging<Params>(Primary.Options.load());
+    return useMemoryTagging<Config>(Primary.Options.load());
   }
   void disableMemoryTagging() {
     // If we haven't been initialized yet, we need to initialize now in order to
@@ -905,7 +906,7 @@ class Allocator {
     // callback), which may cause mappings to be created with memory tagging
     // enabled.
     TSDRegistry.initOnceMaybe(this);
-    if (allocatorSupportsMemoryTagging<Params>()) {
+    if (allocatorSupportsMemoryTagging<Config>()) {
       Secondary.disableMemoryTagging();
       Primary.Options.clear(OptionBit::UseMemoryTagging);
     }
@@ -989,7 +990,7 @@ class Allocator {
                            const char *Memory, const char *MemoryTags,
                            uintptr_t MemoryAddr, size_t MemorySize) {
     *ErrorInfo = {};
-    if (!allocatorSupportsMemoryTagging<Params>() ||
+    if (!allocatorSupportsMemoryTagging<Config>() ||
         MemoryAddr + MemorySize < MemoryAddr)
       return;
 
@@ -1017,7 +1018,6 @@ class Allocator {
   }
 
 private:
-  using SecondaryT = MapAllocator<Params>;
   typedef typename PrimaryT::SizeClassMap SizeClassMap;
 
   static const uptr MinAlignmentLog = SCUDO_MIN_ALIGNMENT_LOG;
@@ -1029,7 +1029,7 @@ class Allocator {
 
   static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
                 "Minimal alignment must at least cover a chunk header.");
-  static_assert(!allocatorSupportsMemoryTagging<Params>() ||
+  static_assert(!allocatorSupportsMemoryTagging<Config>() ||
                     MinAlignment >= archMemoryTagGranuleSize(),
                 "");
 
@@ -1129,7 +1129,7 @@ class Allocator {
     const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
     if (LIKELY(Header->ClassId))
       return SizeOrUnusedBytes;
-    if (allocatorSupportsMemoryTagging<Params>())
+    if (allocatorSupportsMemoryTagging<Config>())
       Ptr = untagPointer(const_cast<void *>(Ptr));
     return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
            reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
@@ -1150,12 +1150,12 @@ class Allocator {
       NewHeader.State = Chunk::State::Available;
     else
       NewHeader.State = Chunk::State::Quarantined;
-    NewHeader.OriginOrWasZeroed = useMemoryTagging<Params>(Options) &&
+    NewHeader.OriginOrWasZeroed = useMemoryTagging<Config>(Options) &&
                                   NewHeader.ClassId &&
                                   !TSDRegistry.getDisableMemInit();
     Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
 
-    if (UNLIKELY(useMemoryTagging<Params>(Options))) {
+    if (UNLIKELY(useMemoryTagging<Config>(Options))) {
       u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
       storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
       if (NewHeader.ClassId) {
@@ -1172,7 +1172,7 @@ class Allocator {
       }
     }
     if (BypassQuarantine) {
-      if (allocatorSupportsMemoryTagging<Params>())
+      if (allocatorSupportsMemoryTagging<Config>())
         Ptr = untagPointer(Ptr);
       void *BlockBegin = getBlockBegin(Ptr, &NewHeader);
       const uptr ClassId = NewHeader.ClassId;
@@ -1183,7 +1183,7 @@ class Allocator {
         if (UnlockRequired)
           TSD->unlock();
       } else {
-        if (UNLIKELY(useMemoryTagging<Params>(Options)))
+        if (UNLIKELY(useMemoryTagging<Config>(Options)))
           storeTags(reinterpret_cast<uptr>(BlockBegin),
                     reinterpret_cast<uptr>(Ptr));
         Secondary.deallocate(Options, BlockBegin);

diff  --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index 6d306effe34ee..f67a0498ada87 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -42,13 +42,14 @@ namespace scudo {
 
 template <typename Config> class SizeClassAllocator32 {
 public:
-  typedef typename Config::PrimaryCompactPtrT CompactPtrT;
-  typedef typename Config::SizeClassMap SizeClassMap;
-  static const uptr GroupSizeLog = Config::PrimaryGroupSizeLog;
+  typedef typename Config::Primary::CompactPtrT CompactPtrT;
+  typedef typename Config::Primary::SizeClassMap SizeClassMap;
+  static const uptr GroupSizeLog = Config::Primary::GroupSizeLog;
   // The bytemap can only track UINT8_MAX - 1 classes.
   static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
   // Regions should be large enough to hold the largest Block.
-  static_assert((1UL << Config::PrimaryRegionSizeLog) >= SizeClassMap::MaxSize,
+  static_assert((1UL << Config::Primary::RegionSizeLog) >=
+                    SizeClassMap::MaxSize,
                 "");
   typedef SizeClassAllocator32<Config> ThisT;
   typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
@@ -281,9 +282,9 @@ template <typename Config> class SizeClassAllocator32 {
 
   bool setOption(Option O, sptr Value) {
     if (O == Option::ReleaseInterval) {
-      const s32 Interval = Max(
-          Min(static_cast<s32>(Value), Config::PrimaryMaxReleaseToOsIntervalMs),
-          Config::PrimaryMinReleaseToOsIntervalMs);
+      const s32 Interval = Max(Min(static_cast<s32>(Value),
+                                   Config::Primary::MaxReleaseToOsIntervalMs),
+                               Config::Primary::MinReleaseToOsIntervalMs);
       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
       return true;
     }
@@ -315,9 +316,9 @@ template <typename Config> class SizeClassAllocator32 {
 
 private:
   static const uptr NumClasses = SizeClassMap::NumClasses;
-  static const uptr RegionSize = 1UL << Config::PrimaryRegionSizeLog;
+  static const uptr RegionSize = 1UL << Config::Primary::RegionSizeLog;
   static const uptr NumRegions =
-      SCUDO_MMAP_RANGE_SIZE >> Config::PrimaryRegionSizeLog;
+      SCUDO_MMAP_RANGE_SIZE >> Config::Primary::RegionSizeLog;
   static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
   typedef FlatByteMap<NumRegions> ByteMap;
 
@@ -350,7 +351,7 @@ template <typename Config> class SizeClassAllocator32 {
   static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
 
   uptr computeRegionId(uptr Mem) {
-    const uptr Id = Mem >> Config::PrimaryRegionSizeLog;
+    const uptr Id = Mem >> Config::Primary::RegionSizeLog;
     CHECK_LT(Id, NumRegions);
     return Id;
   }
@@ -379,7 +380,7 @@ template <typename Config> class SizeClassAllocator32 {
       unmap(reinterpret_cast<void *>(End), MapEnd - End);
 
     DCHECK_EQ(Region % RegionSize, 0U);
-    static_assert(Config::PrimaryRegionSizeLog == GroupSizeLog,
+    static_assert(Config::Primary::RegionSizeLog == GroupSizeLog,
                   "Memory group should be the same size as Region");
 
     return Region;

diff  --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index 2c76c5f6e5c69..209dfc90c65b7 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -45,11 +45,11 @@ namespace scudo {
 
 template <typename Config> class SizeClassAllocator64 {
 public:
-  typedef typename Config::PrimaryCompactPtrT CompactPtrT;
-  static const uptr CompactPtrScale = Config::PrimaryCompactPtrScale;
-  static const uptr GroupSizeLog = Config::PrimaryGroupSizeLog;
+  typedef typename Config::Primary::CompactPtrT CompactPtrT;
+  typedef typename Config::Primary::SizeClassMap SizeClassMap;
+  static const uptr CompactPtrScale = Config::Primary::CompactPtrScale;
+  static const uptr GroupSizeLog = Config::Primary::GroupSizeLog;
   static const uptr GroupScale = GroupSizeLog - CompactPtrScale;
-  typedef typename Config::SizeClassMap SizeClassMap;
   typedef SizeClassAllocator64<Config> ThisT;
   typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
   typedef typename CacheT::TransferBatch TransferBatch;
@@ -119,10 +119,11 @@ template <typename Config> class SizeClassAllocator64 {
       RegionInfo *Region = getRegionInfo(I);
       // The actual start of a region is offset by a random number of pages
       // when PrimaryEnableRandomOffset is set.
-      Region->RegionBeg = (PrimaryBase + (I << Config::PrimaryRegionSizeLog)) +
-                          (Config::PrimaryEnableRandomOffset
-                               ? ((getRandomModN(&Seed, 16) + 1) * PageSize)
-                               : 0);
+      Region->RegionBeg =
+          (PrimaryBase + (I << Config::Primary::RegionSizeLog)) +
+          (Config::Primary::EnableRandomOffset
+               ? ((getRandomModN(&Seed, 16) + 1) * PageSize)
+               : 0);
       Region->RandState = getRandomU32(&Seed);
       // Releasing small blocks is expensive, set a higher threshold to avoid
       // frequent page releases.
@@ -322,9 +323,9 @@ template <typename Config> class SizeClassAllocator64 {
 
   bool setOption(Option O, sptr Value) {
     if (O == Option::ReleaseInterval) {
-      const s32 Interval = Max(
-          Min(static_cast<s32>(Value), Config::PrimaryMaxReleaseToOsIntervalMs),
-          Config::PrimaryMinReleaseToOsIntervalMs);
+      const s32 Interval = Max(Min(static_cast<s32>(Value),
+                                   Config::Primary::MaxReleaseToOsIntervalMs),
+                               Config::Primary::MinReleaseToOsIntervalMs);
       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
       return true;
     }
@@ -420,11 +421,11 @@ template <typename Config> class SizeClassAllocator64 {
   AtomicOptions Options;
 
 private:
-  static const uptr RegionSize = 1UL << Config::PrimaryRegionSizeLog;
+  static const uptr RegionSize = 1UL << Config::Primary::RegionSizeLog;
   static const uptr NumClasses = SizeClassMap::NumClasses;
   static const uptr PrimarySize = RegionSize * NumClasses;
 
-  static const uptr MapSizeIncrement = Config::PrimaryMapSizeIncrement;
+  static const uptr MapSizeIncrement = Config::Primary::MapSizeIncrement;
   // Fill at most this number of batches from the newly map'd memory.
   static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
 

diff  --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index 94009f5fa9c65..2304b51b13654 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -72,7 +72,7 @@ static void unmap(LargeBlock::Header *H) {
   MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
 }
 
-class MapAllocatorNoCache {
+template <typename Config> class MapAllocatorNoCache {
 public:
   void init(UNUSED s32 ReleaseToOsInterval) {}
   bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
@@ -130,17 +130,18 @@ template <typename T> class NonZeroLengthArray<T, 0> {
 
 template <typename Config> class MapAllocatorCache {
 public:
+  using CacheConfig = typename Config::Secondary::Cache;
   // Ensure the default maximum specified fits the array.
-  static_assert(Config::SecondaryCacheDefaultMaxEntriesCount <=
-                    Config::SecondaryCacheEntriesArraySize,
+  static_assert(CacheConfig::DefaultMaxEntriesCount <=
+                    CacheConfig::EntriesArraySize,
                 "");
 
   void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
     DCHECK_EQ(EntriesCount, 0U);
     setOption(Option::MaxCacheEntriesCount,
-              static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntriesCount));
+              static_cast<sptr>(CacheConfig::DefaultMaxEntriesCount));
     setOption(Option::MaxCacheEntrySize,
-              static_cast<sptr>(Config::SecondaryCacheDefaultMaxEntrySize));
+              static_cast<sptr>(CacheConfig::DefaultMaxEntrySize));
     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
   }
 
@@ -185,10 +186,9 @@ template <typename Config> class MapAllocatorCache {
         // just unmap it.
         break;
       }
-      if (Config::SecondaryCacheQuarantineSize &&
-          useMemoryTagging<Config>(Options)) {
+      if (CacheConfig::QuarantineSize && useMemoryTagging<Config>(Options)) {
         QuarantinePos =
-            (QuarantinePos + 1) % Max(Config::SecondaryCacheQuarantineSize, 1u);
+            (QuarantinePos + 1) % Max(CacheConfig::QuarantineSize, 1u);
         if (!Quarantine[QuarantinePos].CommitBase) {
           Quarantine[QuarantinePos] = Entry;
           return;
@@ -291,16 +291,15 @@ template <typename Config> class MapAllocatorCache {
 
   bool setOption(Option O, sptr Value) {
     if (O == Option::ReleaseInterval) {
-      const s32 Interval =
-          Max(Min(static_cast<s32>(Value),
-                  Config::SecondaryCacheMaxReleaseToOsIntervalMs),
-              Config::SecondaryCacheMinReleaseToOsIntervalMs);
+      const s32 Interval = Max(
+          Min(static_cast<s32>(Value), CacheConfig::MaxReleaseToOsIntervalMs),
+          CacheConfig::MinReleaseToOsIntervalMs);
       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
       return true;
     }
     if (O == Option::MaxCacheEntriesCount) {
       const u32 MaxCount = static_cast<u32>(Value);
-      if (MaxCount > Config::SecondaryCacheEntriesArraySize)
+      if (MaxCount > CacheConfig::EntriesArraySize)
         return false;
       atomic_store_relaxed(&MaxEntriesCount, MaxCount);
       return true;
@@ -317,7 +316,7 @@ template <typename Config> class MapAllocatorCache {
 
   void disableMemoryTagging() EXCLUDES(Mutex) {
     ScopedLock L(Mutex);
-    for (u32 I = 0; I != Config::SecondaryCacheQuarantineSize; ++I) {
+    for (u32 I = 0; I != CacheConfig::QuarantineSize; ++I) {
       if (Quarantine[I].CommitBase) {
         MemMapT &MemMap = Quarantine[I].MemMap;
         MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
@@ -342,11 +341,11 @@ template <typename Config> class MapAllocatorCache {
 
 private:
   void empty() {
-    MemMapT MapInfo[Config::SecondaryCacheEntriesArraySize];
+    MemMapT MapInfo[CacheConfig::EntriesArraySize];
     uptr N = 0;
     {
       ScopedLock L(Mutex);
-      for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++) {
+      for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++) {
         if (!Entries[I].CommitBase)
           continue;
         MapInfo[N] = Entries[I].MemMap;
@@ -387,9 +386,9 @@ template <typename Config> class MapAllocatorCache {
     if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
       return;
     OldestTime = 0;
-    for (uptr I = 0; I < Config::SecondaryCacheQuarantineSize; I++)
+    for (uptr I = 0; I < CacheConfig::QuarantineSize; I++)
       releaseIfOlderThan(Quarantine[I], Time);
-    for (uptr I = 0; I < Config::SecondaryCacheEntriesArraySize; I++)
+    for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++)
       releaseIfOlderThan(Entries[I], Time);
   }
 
@@ -402,9 +401,8 @@ template <typename Config> class MapAllocatorCache {
   u32 IsFullEvents GUARDED_BY(Mutex) = 0;
   atomic_s32 ReleaseToOsIntervalMs = {};
 
-  CachedBlock
-      Entries[Config::SecondaryCacheEntriesArraySize] GUARDED_BY(Mutex) = {};
-  NonZeroLengthArray<CachedBlock, Config::SecondaryCacheQuarantineSize>
+  CachedBlock Entries[CacheConfig::EntriesArraySize] GUARDED_BY(Mutex) = {};
+  NonZeroLengthArray<CachedBlock, CacheConfig::QuarantineSize>
       Quarantine GUARDED_BY(Mutex) = {};
 };
 
@@ -469,7 +467,7 @@ template <typename Config> class MapAllocator {
   void unmapTestOnly() { Cache.unmapTestOnly(); }
 
 private:
-  typename Config::SecondaryCache Cache;
+  typename Config::Secondary::template CacheT<Config> Cache;
 
   mutable HybridMutex Mutex;
   DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);

diff  --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index 44ba639f7aa24..a4916d0ba027e 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -541,21 +541,29 @@ struct DeathSizeClassConfig {
 static const scudo::uptr DeathRegionSizeLog = 21U;
 struct DeathConfig {
   static const bool MaySupportMemoryTagging = false;
-
-  // Tiny allocator, its Primary only serves chunks of four sizes.
-  using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
-  typedef scudo::SizeClassAllocator64<DeathConfig> Primary;
-  static const scudo::uptr PrimaryRegionSizeLog = DeathRegionSizeLog;
-  static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
-  static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
-  typedef scudo::uptr PrimaryCompactPtrT;
-  static const scudo::uptr PrimaryCompactPtrScale = 0;
-  static const bool PrimaryEnableRandomOffset = true;
-  static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
-  static const scudo::uptr PrimaryGroupSizeLog = 18;
-
-  typedef scudo::MapAllocatorNoCache SecondaryCache;
   template <class A> using TSDRegistryT = scudo::TSDRegistrySharedT<A, 1U, 1U>;
+
+  struct Primary {
+    // Tiny allocator, its Primary only serves chunks of four sizes.
+    using SizeClassMap = scudo::FixedSizeClassMap<DeathSizeClassConfig>;
+    static const scudo::uptr RegionSizeLog = DeathRegionSizeLog;
+    static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
+    static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+    typedef scudo::uptr CompactPtrT;
+    static const scudo::uptr CompactPtrScale = 0;
+    static const bool EnableRandomOffset = true;
+    static const scudo::uptr MapSizeIncrement = 1UL << 18;
+    static const scudo::uptr GroupSizeLog = 18;
+  };
+  template <typename Config>
+  using PrimaryT = scudo::SizeClassAllocator64<Config>;
+
+  struct Secondary {
+    template <typename Config>
+    using CacheT = scudo::MapAllocatorNoCache<Config>;
+  };
+
+  template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
 };
 
 TEST(ScudoCombinedDeathTest, DeathCombined) {
@@ -600,13 +608,14 @@ TEST(ScudoCombinedTest, FullRegion) {
   std::vector<void *> V;
   scudo::uptr FailedAllocationsCount = 0;
   for (scudo::uptr ClassId = 1U;
-       ClassId <= DeathConfig::SizeClassMap::LargestClassId; ClassId++) {
+       ClassId <= DeathConfig::Primary::SizeClassMap::LargestClassId;
+       ClassId++) {
     const scudo::uptr Size =
-        DeathConfig::SizeClassMap::getSizeByClassId(ClassId);
+        DeathConfig::Primary::SizeClassMap::getSizeByClassId(ClassId);
     // Allocate enough to fill all of the regions above this one.
     const scudo::uptr MaxNumberOfChunks =
         ((1U << DeathRegionSizeLog) / Size) *
-        (DeathConfig::SizeClassMap::LargestClassId - ClassId + 1);
+        (DeathConfig::Primary::SizeClassMap::LargestClassId - ClassId + 1);
     void *P;
     for (scudo::uptr I = 0; I <= MaxNumberOfChunks; I++) {
       P = Allocator->allocate(Size - 64U, Origin);

diff  --git a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
index fbb0cfe28344b..fb67d448331f5 100644
--- a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
@@ -25,82 +25,96 @@
 // 32-bit architectures. It's not something we want to encourage, but we still
 // should ensure the tests pass.
 
-struct TestConfig1 {
-  static const scudo::uptr PrimaryRegionSizeLog = 18U;
-  static const scudo::uptr PrimaryGroupSizeLog = 18U;
-  static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
-  static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
+template <typename SizeClassMapT> struct TestConfig1 {
   static const bool MaySupportMemoryTagging = false;
-  typedef scudo::uptr PrimaryCompactPtrT;
-  static const scudo::uptr PrimaryCompactPtrScale = 0;
-  static const bool PrimaryEnableRandomOffset = true;
-  static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
+
+  struct Primary {
+    using SizeClassMap = SizeClassMapT;
+    static const scudo::uptr RegionSizeLog = 18U;
+    static const scudo::uptr GroupSizeLog = 18U;
+    static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
+    static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+    typedef scudo::uptr CompactPtrT;
+    static const scudo::uptr CompactPtrScale = 0;
+    static const bool EnableRandomOffset = true;
+    static const scudo::uptr MapSizeIncrement = 1UL << 18;
+  };
 };
 
-struct TestConfig2 {
+template <typename SizeClassMapT> struct TestConfig2 {
+  static const bool MaySupportMemoryTagging = false;
+
+  struct Primary {
+    using SizeClassMap = SizeClassMapT;
 #if defined(__mips__)
-  // Unable to allocate greater size on QEMU-user.
-  static const scudo::uptr PrimaryRegionSizeLog = 23U;
+    // Unable to allocate greater size on QEMU-user.
+    static const scudo::uptr RegionSizeLog = 23U;
 #else
-  static const scudo::uptr PrimaryRegionSizeLog = 24U;
+    static const scudo::uptr RegionSizeLog = 24U;
 #endif
-  static const scudo::uptr PrimaryGroupSizeLog = 20U;
-  static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
-  static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
-  static const bool MaySupportMemoryTagging = false;
-  typedef scudo::uptr PrimaryCompactPtrT;
-  static const scudo::uptr PrimaryCompactPtrScale = 0;
-  static const bool PrimaryEnableRandomOffset = true;
-  static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
+    static const scudo::uptr GroupSizeLog = 20U;
+    static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
+    static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+    typedef scudo::uptr CompactPtrT;
+    static const scudo::uptr CompactPtrScale = 0;
+    static const bool EnableRandomOffset = true;
+    static const scudo::uptr MapSizeIncrement = 1UL << 18;
+  };
 };
 
-struct TestConfig3 {
+template <typename SizeClassMapT> struct TestConfig3 {
+  static const bool MaySupportMemoryTagging = true;
+
+  struct Primary {
+    using SizeClassMap = SizeClassMapT;
 #if defined(__mips__)
-  // Unable to allocate greater size on QEMU-user.
-  static const scudo::uptr PrimaryRegionSizeLog = 23U;
+    // Unable to allocate greater size on QEMU-user.
+    static const scudo::uptr RegionSizeLog = 23U;
 #else
-  static const scudo::uptr PrimaryRegionSizeLog = 24U;
+    static const scudo::uptr RegionSizeLog = 24U;
 #endif
-  static const scudo::uptr PrimaryGroupSizeLog = 20U;
-  static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
-  static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
-  static const bool MaySupportMemoryTagging = true;
-  typedef scudo::uptr PrimaryCompactPtrT;
-  static const scudo::uptr PrimaryCompactPtrScale = 0;
-  static const bool PrimaryEnableRandomOffset = true;
-  static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
+    static const scudo::uptr GroupSizeLog = 20U;
+    static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
+    static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+    typedef scudo::uptr CompactPtrT;
+    static const scudo::uptr CompactPtrScale = 0;
+    static const bool EnableRandomOffset = true;
+    static const scudo::uptr MapSizeIncrement = 1UL << 18;
+  };
 };
 
-struct TestConfig4 {
+template <typename SizeClassMapT> struct TestConfig4 {
+  static const bool MaySupportMemoryTagging = true;
+
+  struct Primary {
+    using SizeClassMap = SizeClassMapT;
 #if defined(__mips__)
-  // Unable to allocate greater size on QEMU-user.
-  static const scudo::uptr PrimaryRegionSizeLog = 23U;
+    // Unable to allocate greater size on QEMU-user.
+    static const scudo::uptr RegionSizeLog = 23U;
 #else
-  static const scudo::uptr PrimaryRegionSizeLog = 24U;
+    static const scudo::uptr RegionSizeLog = 24U;
 #endif
-  static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
-  static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
-  static const bool MaySupportMemoryTagging = true;
-  static const scudo::uptr PrimaryCompactPtrScale = 3U;
-  static const scudo::uptr PrimaryGroupSizeLog = 20U;
-  typedef scudo::u32 PrimaryCompactPtrT;
-  static const bool PrimaryEnableRandomOffset = true;
-  static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
+    static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
+    static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+    static const scudo::uptr CompactPtrScale = 3U;
+    static const scudo::uptr GroupSizeLog = 20U;
+    typedef scudo::u32 CompactPtrT;
+    static const bool EnableRandomOffset = true;
+    static const scudo::uptr MapSizeIncrement = 1UL << 18;
+  };
 };
 
-template <typename BaseConfig, typename SizeClassMapT>
-struct Config : public BaseConfig {
-  using SizeClassMap = SizeClassMapT;
-};
+template <template <typename> class BaseConfig, typename SizeClassMapT>
+struct Config : public BaseConfig<SizeClassMapT> {};
 
-template <typename BaseConfig, typename SizeClassMapT>
+template <template <typename> class BaseConfig, typename SizeClassMapT>
 struct SizeClassAllocator
     : public scudo::SizeClassAllocator64<Config<BaseConfig, SizeClassMapT>> {};
 template <typename SizeClassMapT>
 struct SizeClassAllocator<TestConfig1, SizeClassMapT>
     : public scudo::SizeClassAllocator32<Config<TestConfig1, SizeClassMapT>> {};
 
-template <typename BaseConfig, typename SizeClassMapT>
+template <template <typename> class BaseConfig, typename SizeClassMapT>
 struct TestAllocator : public SizeClassAllocator<BaseConfig, SizeClassMapT> {
   ~TestAllocator() { this->unmapTestOnly(); }
 
@@ -113,7 +127,8 @@ struct TestAllocator : public SizeClassAllocator<BaseConfig, SizeClassMapT> {
   void operator delete(void *ptr) { free(ptr); }
 };
 
-template <class BaseConfig> struct ScudoPrimaryTest : public Test {};
+template <template <typename> class BaseConfig>
+struct ScudoPrimaryTest : public Test {};
 
 #if SCUDO_FUCHSIA
 #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                              \
@@ -132,12 +147,13 @@ template <class BaseConfig> struct ScudoPrimaryTest : public Test {};
   TEST_F(FIXTURE##NAME##_##TYPE, NAME) { FIXTURE##NAME<TYPE>::Run(); }
 
 #define SCUDO_TYPED_TEST(FIXTURE, NAME)                                        \
-  template <class TypeParam>                                                   \
+  template <template <typename> class TypeParam>                               \
   struct FIXTURE##NAME : public FIXTURE<TypeParam> {                           \
     void Run();                                                                \
   };                                                                           \
   SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME)                                    \
-  template <class TypeParam> void FIXTURE##NAME<TypeParam>::Run()
+  template <template <typename> class TypeParam>                               \
+  void FIXTURE##NAME<TypeParam>::Run()
 
 SCUDO_TYPED_TEST(ScudoPrimaryTest, BasicPrimary) {
   using Primary = TestAllocator<TypeParam, scudo::DefaultSizeClassMap>;
@@ -168,16 +184,19 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, BasicPrimary) {
 }
 
 struct SmallRegionsConfig {
-  using SizeClassMap = scudo::DefaultSizeClassMap;
-  static const scudo::uptr PrimaryRegionSizeLog = 21U;
-  static const scudo::s32 PrimaryMinReleaseToOsIntervalMs = INT32_MIN;
-  static const scudo::s32 PrimaryMaxReleaseToOsIntervalMs = INT32_MAX;
   static const bool MaySupportMemoryTagging = false;
-  typedef scudo::uptr PrimaryCompactPtrT;
-  static const scudo::uptr PrimaryCompactPtrScale = 0;
-  static const bool PrimaryEnableRandomOffset = true;
-  static const scudo::uptr PrimaryMapSizeIncrement = 1UL << 18;
-  static const scudo::uptr PrimaryGroupSizeLog = 20U;
+
+  struct Primary {
+    using SizeClassMap = scudo::DefaultSizeClassMap;
+    static const scudo::uptr RegionSizeLog = 21U;
+    static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
+    static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+    typedef scudo::uptr CompactPtrT;
+    static const scudo::uptr CompactPtrScale = 0;
+    static const bool EnableRandomOffset = true;
+    static const scudo::uptr MapSizeIncrement = 1UL << 18;
+    static const scudo::uptr GroupSizeLog = 20U;
+  };
 };
 
 // The 64-bit SizeClassAllocator can be easily OOM'd with small region sizes.

diff  --git a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
index b0319011771aa..b8dbf4a5226c0 100644
--- a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
@@ -83,19 +83,27 @@ template <typename Config> static void testSecondaryBasic(void) {
 }
 
 struct NoCacheConfig {
-  typedef scudo::MapAllocatorNoCache SecondaryCache;
   static const bool MaySupportMemoryTagging = false;
+  struct Secondary {
+    template <typename Config>
+    using CacheT = scudo::MapAllocatorNoCache<Config>;
+  };
 };
 
 struct TestConfig {
-  typedef scudo::MapAllocatorCache<TestConfig> SecondaryCache;
   static const bool MaySupportMemoryTagging = false;
-  static const scudo::u32 SecondaryCacheEntriesArraySize = 128U;
-  static const scudo::u32 SecondaryCacheQuarantineSize = 0U;
-  static const scudo::u32 SecondaryCacheDefaultMaxEntriesCount = 64U;
-  static const scudo::uptr SecondaryCacheDefaultMaxEntrySize = 1UL << 20;
-  static const scudo::s32 SecondaryCacheMinReleaseToOsIntervalMs = INT32_MIN;
-  static const scudo::s32 SecondaryCacheMaxReleaseToOsIntervalMs = INT32_MAX;
+  struct Secondary {
+    struct Cache {
+      static const scudo::u32 EntriesArraySize = 128U;
+      static const scudo::u32 QuarantineSize = 0U;
+      static const scudo::u32 DefaultMaxEntriesCount = 64U;
+      static const scudo::uptr DefaultMaxEntrySize = 1UL << 20;
+      static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN;
+      static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX;
+    };
+
+    template <typename Config> using CacheT = scudo::MapAllocatorCache<Config>;
+  };
 };
 
 TEST(ScudoSecondaryTest, SecondaryBasic) {


        


More information about the llvm-commits mailing list