[compiler-rt] [scudo] Refactor allocator config to support optional flags (PR #81805)

via llvm-commits llvm-commits at lists.llvm.org
Wed Feb 14 15:19:07 PST 2024


https://github.com/ChiaHungDuan created https://github.com/llvm/llvm-project/pull/81805

Instead of explicitly disabling a feature by declaring the variable and set it to false, this change supports the optional flags. I.e., you can skip certain flags if you are not using it.

This optional feature supports both forms,
  1. Value: A parameter for a feature. E.g., EnableRandomOffset
  2. Type: A C++ type implementing a feature. E.g., ConditionVariableT

On the other hand, to access the flags will be through one of the wrappers, BaseConfig/PrimaryConfig/SecondaryConfig/CacheConfig (CacheConfig is embedded in SecondaryConfig). These wrappers have the getters to access the value and the type. When adding a new feature, we need to add it to `allocator_config.def` and mark the new variable with either *_REQUIRED_* or *_OPTIONAL_* macro so that the accessor will be generated properly.

In addition, also remove the need of `UseConditionVariable` to flip on/off of condition variable. Now we only need to define the type of condition variable.

>From a083cd5c06c862936cf172f487dc57a61f54bfdc Mon Sep 17 00:00:00 2001
From: Chia-hung Duan <chiahungduan at google.com>
Date: Sat, 10 Feb 2024 00:16:45 +0000
Subject: [PATCH] [scudo] Refactor allocator config to support optional flags

Instead of explicitly disabling a feature by declaring the variable and
set it to false, this change supports the optional flags. I.e., you can
skip certain flags if you are not using it.

This optional feature supports both forms,
  1. Value: A parameter for a feature. E.g., EnableRandomOffset
  2. Type: A C++ type implementing a feature. E.g., ConditionVariableT

On the other hand, to access the flags will be through one of the
wrappers, BaseConfig/PrimaryConfig/SecondaryConfig/CacheConfig
(CacheConfig is embedded in SecondaryConfig). These wrappers have the
getters to access the value and the type. When adding a new feature, we
need to add it to `allocator_config.def` and mark the new variable with
either *_REQUIRED_* or *_OPTIONAL_* macro so that the accessor will be
generated properly.

In addition, also remove the need of `UseConditionVariable` to flip
on/off of condition variable. Now we only need to define the type of
condition variable.
---
 .../lib/scudo/standalone/CMakeLists.txt       |   1 +
 .../lib/scudo/standalone/allocator_config.def | 123 ++++++++++++++++
 .../lib/scudo/standalone/allocator_config.h   |  78 +---------
 .../standalone/allocator_config_wrapper.h     | 134 ++++++++++++++++++
 compiler-rt/lib/scudo/standalone/combined.h   |  54 +++----
 .../lib/scudo/standalone/condition_variable.h |  16 ---
 compiler-rt/lib/scudo/standalone/memtag.h     |   2 +-
 compiler-rt/lib/scudo/standalone/primary32.h  |  25 ++--
 compiler-rt/lib/scudo/standalone/primary64.h  |  25 ++--
 compiler-rt/lib/scudo/standalone/secondary.h  |  36 +++--
 .../scudo/standalone/tests/combined_test.cpp  |   1 -
 .../scudo/standalone/tests/primary_test.cpp   |  29 +++-
 .../scudo/standalone/tests/secondary_test.cpp |  22 ++-
 13 files changed, 378 insertions(+), 168 deletions(-)
 create mode 100644 compiler-rt/lib/scudo/standalone/allocator_config.def
 create mode 100644 compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h

diff --git a/compiler-rt/lib/scudo/standalone/CMakeLists.txt b/compiler-rt/lib/scudo/standalone/CMakeLists.txt
index 60092005cc33bb..6fb4e88de3155f 100644
--- a/compiler-rt/lib/scudo/standalone/CMakeLists.txt
+++ b/compiler-rt/lib/scudo/standalone/CMakeLists.txt
@@ -58,6 +58,7 @@ endif()
 set(SCUDO_HEADERS
   allocator_common.h
   allocator_config.h
+  allocator_config_wrapper.h
   atomic_helpers.h
   bytemap.h
   checksum.h
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.def b/compiler-rt/lib/scudo/standalone/allocator_config.def
new file mode 100644
index 00000000000000..ddde71d2463f78
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.def
@@ -0,0 +1,123 @@
+//===-- allocator_config.def ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file defines all the flags and types supported in Scudo.
+
+#ifndef BASE_REQUIRED_TEMPLATE_TYPE
+#define BASE_REQUIRED_TEMPLATE_TYPE(...)
+#endif
+#ifndef BASE_OPTIONAL
+#define BASE_OPTIONAL(...)
+#endif
+#ifndef PRIMARY_REQUIRED_TYPE
+#define PRIMARY_REQUIRED_TYPE(...)
+#endif
+#ifndef PRIMARY_REQUIRED
+#define PRIMARY_REQUIRED(...)
+#endif
+#ifndef PRIMARY_OPTIONAL
+#define PRIMARY_OPTIONAL(...)
+#endif
+#ifndef PRIMARY_OPTIONAL_TYPE
+#define PRIMARY_OPTIONAL_TYPE(...)
+#endif
+#ifndef SECONDARY_REQUIRED_TEMPLATE_TYPE
+#define SECONDARY_REQUIRED_TEMPLATE_TYPE(...)
+#endif
+#ifndef SECONDARY_CACHE_OPTIONAL
+#define SECONDARY_CACHE_OPTIONAL(...)
+#endif
+
+// BASE_REQUIRED_TEMPLATE_TYPE(NAME)
+//
+// Thread-Specific Data Registry used, shared or exclusive.
+BASE_REQUIRED_TEMPLATE_TYPE(TSDRegistryT)
+
+// Defines the type of Primary allocator to use.
+BASE_REQUIRED_TEMPLATE_TYPE(PrimaryT)
+
+// Defines the type of Secondary allocator to use.
+BASE_REQUIRED_TEMPLATE_TYPE(SecondaryT)
+
+// BASE_OPTIONAL(TYPE, NAME, DEFAULT)
+//
+// Indicates possible support for Memory Tagging.
+BASE_OPTIONAL(const bool, MaySupportMemoryTagging, false)
+
+// PRIMARY_REQUIRED_TYPE(NAME)
+//
+// SizeClassMap to use with the Primary.
+PRIMARY_REQUIRED_TYPE(SizeClassMap)
+
+// Defines the type and scale of a compact pointer. A compact pointer can
+// be understood as the offset of a pointer within the region it belongs
+// to, in increments of a power-of-2 scale. See `CompactPtrScale` also.
+PRIMARY_REQUIRED_TYPE(CompactPtrT)
+
+// PRIMARY_REQUIRED(TYPE, NAME)
+//
+// The scale of a compact pointer. E.g., Ptr = Base + (CompactPtr << Scale).
+PRIMARY_REQUIRED(const uptr, CompactPtrScale)
+
+// Log2 of the size of a size class region, as used by the Primary.
+PRIMARY_REQUIRED(const uptr, RegionSizeLog)
+
+// Log2 of the size of block group, as used by the Primary. Each group
+// contains a range of memory addresses, blocks in the range will belong
+// to the same group. In general, single region may have 1 or 2MB group
+// size. Multiple regions will have the group size equal to the region
+// size because the region size is usually smaller than 1 MB.
+// Smaller value gives fine-grained control of memory usage but the
+// trade-off is that it may take longer time of deallocation.
+PRIMARY_REQUIRED(const uptr, GroupSizeLog)
+
+// Call map for user memory with at least this size. Only used with primary64.
+PRIMARY_REQUIRED(const uptr, MapSizeIncrement)
+
+// Defines the minimal & maximal release interval that can be set.
+PRIMARY_REQUIRED(const s32, MinReleaseToOsIntervalMs)
+PRIMARY_REQUIRED(const s32, MaxReleaseToOsIntervalMs)
+
+// PRIMARY_OPTIONAL(TYPE, NAME, DEFAULT)
+//
+// Indicates support for offsetting the start of a region by a random number of
+// pages. Only used with primary64.
+PRIMARY_OPTIONAL(const bool, EnableRandomOffset, false)
+
+// PRIMARY_OPTIONAL_TYPE(NAME, DEFAULT)
+//
+// Use condition variable to shorten the waiting time of refillment of
+// freelist. Note that this depends on the implementation of condition
+// variable on each platform and the performance may vary so that it doesn not
+// guarantee a performance benefit.
+PRIMARY_OPTIONAL_TYPE(ConditionVariableT, ConditionVariableDummy)
+
+// SECONDARY_REQUIRED_TEMPLATE_TYPE(NAME)
+//
+// Defines the type of Secondary Cache to use.
+SECONDARY_REQUIRED_TEMPLATE_TYPE(CacheT)
+
+// SECONDARY_CACHE_OPTIONAL(TYPE, NAME, DEFAULT)
+//
+// Defines the type of cache used by the Secondary. Some additional
+// configuration entries can be necessary depending on the Cache.
+SECONDARY_CACHE_OPTIONAL(const u32, EntriesArraySize, 0)
+SECONDARY_CACHE_OPTIONAL(const u32, QuarantineSize, 0)
+SECONDARY_CACHE_OPTIONAL(const u32, DefaultMaxEntriesCount, 0)
+SECONDARY_CACHE_OPTIONAL(const u32, DefaultMaxEntrySize, 0)
+SECONDARY_CACHE_OPTIONAL(const s32, MinReleaseToOsIntervalMs, INT32_MIN)
+SECONDARY_CACHE_OPTIONAL(const s32, MaxReleaseToOsIntervalMs, INT32_MAX)
+
+#undef SECONDARY_CACHE_OPTIONAL
+#undef SECONDARY_REQUIRED_TEMPLATE_TYPE
+#undef PRIMARY_OPTIONAL_TYPE
+#undef PRIMARY_OPTIONAL
+#undef PRIMARY_REQUIRED
+#undef PRIMARY_REQUIRED_TYPE
+#undef BASE_OPTIONAL
+#undef BASE_REQUIRED_TEMPLATE_TYPE
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.h b/compiler-rt/lib/scudo/standalone/allocator_config.h
index 3c6aa3acb0e45b..1e0cf1015ba67e 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.h
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.h
@@ -38,80 +38,10 @@
 
 namespace scudo {
 
-// The combined allocator uses a structure as a template argument that
-// specifies the configuration options for the various subcomponents of the
-// allocator.
-//
-// struct ExampleConfig {
-//   // Indicates possible support for Memory Tagging.
-//   static const bool MaySupportMemoryTagging = false;
-//
-//   // Thread-Specific Data Registry used, shared or exclusive.
-//   template <class A> using TSDRegistryT = TSDRegistrySharedT<A, 8U, 4U>;
-//
-//   struct Primary {
-//     // SizeClassMap to use with the Primary.
-//     using SizeClassMap = DefaultSizeClassMap;
-//
-//     // Log2 of the size of a size class region, as used by the Primary.
-//     static const uptr RegionSizeLog = 30U;
-//
-//     // Log2 of the size of block group, as used by the Primary. Each group
-//     // contains a range of memory addresses, blocks in the range will belong
-//     // to the same group. In general, single region may have 1 or 2MB group
-//     // size. Multiple regions will have the group size equal to the region
-//     // size because the region size is usually smaller than 1 MB.
-//     // Smaller value gives fine-grained control of memory usage but the
-//     // trade-off is that it may take longer time of deallocation.
-//     static const uptr GroupSizeLog = 20U;
-//
-//     // Defines the type and scale of a compact pointer. A compact pointer can
-//     // be understood as the offset of a pointer within the region it belongs
-//     // to, in increments of a power-of-2 scale.
-//     // eg: Ptr = Base + (CompactPtr << Scale).
-//     typedef u32 CompactPtrT;
-//     static const uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG;
-//
-//     // Indicates support for offsetting the start of a region by
-//     // a random number of pages. Only used with primary64.
-//     static const bool EnableRandomOffset = true;
-//
-//     // Call map for user memory with at least this size. Only used with
-//     // primary64.
-//     static const uptr MapSizeIncrement = 1UL << 18;
-//
-//     // Defines the minimal & maximal release interval that can be set.
-//     static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
-//     static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
-//
-//     // Use condition variable to shorten the waiting time of refillment of
-//     // freelist. Note that this depends on the implementation of condition
-//     // variable on each platform and the performance may vary so that it
-//     // doesn't guarantee a performance benefit.
-//     // Note that both variables have to be defined to enable it.
-//     static const bool UseConditionVariable = true;
-//     using ConditionVariableT = ConditionVariableLinux;
-//   };
-//   // Defines the type of Primary allocator to use.
-//   template <typename Config> using PrimaryT = SizeClassAllocator64<Config>;
-//
-//   // Defines the type of cache used by the Secondary. Some additional
-//   // configuration entries can be necessary depending on the Cache.
-//   struct Secondary {
-//     struct Cache {
-//       static const u32 EntriesArraySize = 32U;
-//       static const u32 QuarantineSize = 0U;
-//       static const u32 DefaultMaxEntriesCount = 32U;
-//       static const uptr DefaultMaxEntrySize = 1UL << 19;
-//       static const s32 MinReleaseToOsIntervalMs = INT32_MIN;
-//       static const s32 MaxReleaseToOsIntervalMs = INT32_MAX;
-//     };
-//     // Defines the type of Secondary Cache to use.
-//     template <typename Config> using CacheT = MapAllocatorCache<Config>;
-//   };
-//   // Defines the type of Secondary allocator to use.
-//   template <typename Config> using SecondaryT = MapAllocator<Config>;
-// };
+// Scudo uses a structure as a template argument that specifies the
+// configuration options for the various subcomponents of the allocator. See the
+// following configs as examples and check `allocator_config.def` for all the
+// available options.
 
 #ifndef SCUDO_USE_CUSTOM_CONFIG
 
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h b/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h
new file mode 100644
index 00000000000000..c946531dc4e02f
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/allocator_config_wrapper.h
@@ -0,0 +1,134 @@
+//===-- allocator_config_wrapper.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_CONFIG_WRAPPER_H_
+#define SCUDO_ALLOCATOR_CONFIG_WRAPPER_H_
+
+#include "condition_variable.h"
+#include "internal_defs.h"
+#include "secondary.h"
+
+namespace {
+
+template <typename T> struct removeConst {
+  using type = T;
+};
+template <typename T> struct removeConst<const T> {
+  using type = T;
+};
+
+} // namespace
+
+namespace scudo {
+
+#define OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT)                                 \
+  template <typename Config, typename = TYPE> struct NAME##State {             \
+    static constexpr removeConst<TYPE>::type getValue() { return DEFAULT; }    \
+  };                                                                           \
+  template <typename Config>                                                   \
+  struct NAME##State<Config, decltype(Config::NAME)> {                         \
+    static constexpr removeConst<TYPE>::type getValue() {                      \
+      return Config::NAME;                                                     \
+    }                                                                          \
+  };
+
+#define OPTIONAL_TYPE_TEMPLATE(NAME, DEFAULT)                                  \
+  template <typename Config, typename Void = Config> struct NAME##Type {       \
+    static constexpr bool enabled() { return false; }                          \
+    using NAME = DEFAULT;                                                      \
+  };                                                                           \
+  template <typename Config>                                                   \
+  struct NAME##Type<Config, typename Config::NAME> {                           \
+    static constexpr bool enabled() { return true; }                           \
+    using NAME = typename Config::NAME;                                        \
+  };
+
+template <typename AllocatorConfig> struct BaseConfig {
+#define BASE_REQUIRED_TEMPLATE_TYPE(NAME)                                      \
+  template <typename T> using NAME = typename AllocatorConfig::template NAME<T>;
+
+#define BASE_OPTIONAL(TYPE, NAME, DEFAULT)                                     \
+  OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT)                                       \
+  static constexpr removeConst<TYPE>::type get##NAME() {                       \
+    return NAME##State<AllocatorConfig>::getValue();                           \
+  }
+
+#include "allocator_config.def"
+}; // BaseConfig
+
+template <typename AllocatorConfig> struct PrimaryConfig {
+  // TODO: Pass this flag through template argument to remove this hard-coded
+  //       function.
+  static constexpr bool getMaySupportMemoryTagging() {
+    return BaseConfig<AllocatorConfig>::getMaySupportMemoryTagging();
+  }
+
+#define PRIMARY_REQUIRED_TYPE(NAME)                                            \
+  using NAME = typename AllocatorConfig::Primary::NAME;
+
+#define PRIMARY_REQUIRED(TYPE, NAME)                                           \
+  static constexpr removeConst<TYPE>::type get##NAME() {                       \
+    return AllocatorConfig::Primary::NAME;                                     \
+  }
+
+#define PRIMARY_OPTIONAL(TYPE, NAME, DEFAULT)                                  \
+  OPTIONAL_TEMPLATE(TYPE, NAME, DEFAULT)                                       \
+  static constexpr removeConst<TYPE>::type get##NAME() {                       \
+    return NAME##State<typename AllocatorConfig::Primary>::getValue();         \
+  }
+
+#define PRIMARY_OPTIONAL_TYPE(NAME, DEFAULT)                                   \
+  OPTIONAL_TYPE_TEMPLATE(NAME, DEFAULT)                                        \
+  static constexpr bool has##NAME() {                                          \
+    return NAME##Type<typename AllocatorConfig::Primary>::enabled();           \
+  }                                                                            \
+  using NAME = typename NAME##Type<typename AllocatorConfig::Primary>::NAME;
+
+#include "allocator_config.def"
+
+}; // PrimaryConfig
+
+template <typename AllocatorConfig> struct SecondaryConfig {
+  // TODO: Pass this flag through template argument to remove this hard-coded
+  //       function.
+  static constexpr bool getMaySupportMemoryTagging() {
+    return BaseConfig<AllocatorConfig>::getMaySupportMemoryTagging();
+  }
+
+#define SECONDARY_REQUIRED_TEMPLATE_TYPE(NAME)                                 \
+  template <typename T>                                                        \
+  using NAME = typename AllocatorConfig::Secondary::template NAME<T>;
+#include "allocator_config.def"
+
+  struct CacheConfig {
+    // TODO: Pass this flag through template argument to remove this hard-coded
+    //       function.
+    static constexpr bool getMaySupportMemoryTagging() {
+      return BaseConfig<AllocatorConfig>::getMaySupportMemoryTagging();
+    }
+
+#define SECONDARY_CACHE_OPTIONAL(TYPE, NAME, DEFAULT)                          \
+  template <typename Config, typename = TYPE> struct NAME##State {             \
+    static constexpr removeConst<TYPE>::type getValue() { return DEFAULT; }    \
+  };                                                                           \
+  template <typename Config>                                                   \
+  struct NAME##State<Config, decltype(Config::Cache)> {                        \
+    static constexpr removeConst<TYPE>::type getValue() {                      \
+      return Config::Cache::NAME;                                              \
+    }                                                                          \
+  };                                                                           \
+  static constexpr removeConst<TYPE>::type get##NAME() {                       \
+    return NAME##State<typename AllocatorConfig::Secondary>::getValue();       \
+  }
+#include "allocator_config.def"
+  };
+}; // SecondaryConfig
+
+} // namespace scudo
+
+#endif // SCUDO_ALLOCATOR_CONFIG_WRAPPER_H_
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 80774073522a72..3d4023a484073c 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -9,6 +9,7 @@
 #ifndef SCUDO_COMBINED_H_
 #define SCUDO_COMBINED_H_
 
+#include "allocator_config_wrapper.h"
 #include "chunk.h"
 #include "common.h"
 #include "flags.h"
@@ -46,11 +47,14 @@ namespace scudo {
 template <class Config, void (*PostInitCallback)(void) = EmptyCallback>
 class Allocator {
 public:
-  using PrimaryT = typename Config::template PrimaryT<Config>;
-  using SecondaryT = typename Config::template SecondaryT<Config>;
+  using AllocatorConfig = BaseConfig<Config>;
+  using PrimaryT =
+      typename AllocatorConfig::template PrimaryT<PrimaryConfig<Config>>;
+  using SecondaryT =
+      typename AllocatorConfig::template SecondaryT<SecondaryConfig<Config>>;
   using CacheT = typename PrimaryT::CacheT;
   typedef Allocator<Config, PostInitCallback> ThisT;
-  typedef typename Config::template TSDRegistryT<ThisT> TSDRegistryT;
+  typedef typename AllocatorConfig::template TSDRegistryT<ThisT> TSDRegistryT;
 
   void callPostInitCallback() {
     pthread_once(&PostInitNonce, PostInitCallback);
@@ -71,7 +75,7 @@ class Allocator {
       Header.State = Chunk::State::Available;
       Chunk::storeHeader(Allocator.Cookie, Ptr, &Header);
 
-      if (allocatorSupportsMemoryTagging<Config>())
+      if (allocatorSupportsMemoryTagging<AllocatorConfig>())
         Ptr = untagPointer(Ptr);
       void *BlockBegin = Allocator::getBlockBegin(Ptr, &Header);
       Cache.deallocate(Header.ClassId, BlockBegin);
@@ -98,7 +102,8 @@ class Allocator {
 
       // Reset tag to 0 as this chunk may have been previously used for a tagged
       // user allocation.
-      if (UNLIKELY(useMemoryTagging<Config>(Allocator.Primary.Options.load())))
+      if (UNLIKELY(useMemoryTagging<AllocatorConfig>(
+              Allocator.Primary.Options.load())))
         storeTags(reinterpret_cast<uptr>(Ptr),
                   reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
 
@@ -158,7 +163,7 @@ class Allocator {
       Primary.Options.set(OptionBit::DeallocTypeMismatch);
     if (getFlags()->delete_size_mismatch)
       Primary.Options.set(OptionBit::DeleteSizeMismatch);
-    if (allocatorSupportsMemoryTagging<Config>() &&
+    if (allocatorSupportsMemoryTagging<AllocatorConfig>() &&
         systemSupportsMemoryTagging())
       Primary.Options.set(OptionBit::UseMemoryTagging);
 
@@ -261,7 +266,7 @@ class Allocator {
   void drainCaches() { TSDRegistry.drainCaches(this); }
 
   ALWAYS_INLINE void *getHeaderTaggedPointer(void *Ptr) {
-    if (!allocatorSupportsMemoryTagging<Config>())
+    if (!allocatorSupportsMemoryTagging<AllocatorConfig>())
       return Ptr;
     auto UntaggedPtr = untagPointer(Ptr);
     if (UntaggedPtr != Ptr)
@@ -273,7 +278,7 @@ class Allocator {
   }
 
   ALWAYS_INLINE uptr addHeaderTag(uptr Ptr) {
-    if (!allocatorSupportsMemoryTagging<Config>())
+    if (!allocatorSupportsMemoryTagging<AllocatorConfig>())
       return Ptr;
     return addFixedTag(Ptr, 2);
   }
@@ -410,7 +415,7 @@ class Allocator {
       //
       // When memory tagging is enabled, zeroing the contents is done as part of
       // setting the tag.
-      if (UNLIKELY(useMemoryTagging<Config>(Options))) {
+      if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Options))) {
         uptr PrevUserPtr;
         Chunk::UnpackedHeader Header;
         const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
@@ -492,7 +497,7 @@ class Allocator {
     } else {
       Block = addHeaderTag(Block);
       Ptr = addHeaderTag(Ptr);
-      if (UNLIKELY(useMemoryTagging<Config>(Options))) {
+      if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Options))) {
         storeTags(reinterpret_cast<uptr>(Block), reinterpret_cast<uptr>(Ptr));
         storeSecondaryAllocationStackMaybe(Options, Ptr, Size);
       }
@@ -652,7 +657,7 @@ class Allocator {
                            (reinterpret_cast<uptr>(OldTaggedPtr) + NewSize)) &
             Chunk::SizeOrUnusedBytesMask;
         Chunk::storeHeader(Cookie, OldPtr, &Header);
-        if (UNLIKELY(useMemoryTagging<Config>(Options))) {
+        if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Options))) {
           if (ClassId) {
             resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
                               reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
@@ -755,8 +760,9 @@ class Allocator {
       Base = untagPointer(Base);
     const uptr From = Base;
     const uptr To = Base + Size;
-    bool MayHaveTaggedPrimary = allocatorSupportsMemoryTagging<Config>() &&
-                                systemSupportsMemoryTagging();
+    bool MayHaveTaggedPrimary =
+        allocatorSupportsMemoryTagging<AllocatorConfig>() &&
+        systemSupportsMemoryTagging();
     auto Lambda = [this, From, To, MayHaveTaggedPrimary, Callback,
                    Arg](uptr Block) {
       if (Block < From || Block >= To)
@@ -777,9 +783,9 @@ class Allocator {
       }
       if (Header.State == Chunk::State::Allocated) {
         uptr TaggedChunk = Chunk;
-        if (allocatorSupportsMemoryTagging<Config>())
+        if (allocatorSupportsMemoryTagging<AllocatorConfig>())
           TaggedChunk = untagPointer(TaggedChunk);
-        if (useMemoryTagging<Config>(Primary.Options.load()))
+        if (useMemoryTagging<AllocatorConfig>(Primary.Options.load()))
           TaggedChunk = loadTag(Chunk);
         Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
                  Arg);
@@ -878,7 +884,7 @@ class Allocator {
   }
 
   bool useMemoryTaggingTestOnly() const {
-    return useMemoryTagging<Config>(Primary.Options.load());
+    return useMemoryTagging<AllocatorConfig>(Primary.Options.load());
   }
   void disableMemoryTagging() {
     // If we haven't been initialized yet, we need to initialize now in order to
@@ -888,7 +894,7 @@ class Allocator {
     // callback), which may cause mappings to be created with memory tagging
     // enabled.
     TSDRegistry.initOnceMaybe(this);
-    if (allocatorSupportsMemoryTagging<Config>()) {
+    if (allocatorSupportsMemoryTagging<AllocatorConfig>()) {
       Secondary.disableMemoryTagging();
       Primary.Options.clear(OptionBit::UseMemoryTagging);
     }
@@ -959,7 +965,7 @@ class Allocator {
                            const char *MemoryTags, uintptr_t MemoryAddr,
                            size_t MemorySize) {
     *ErrorInfo = {};
-    if (!allocatorSupportsMemoryTagging<Config>() ||
+    if (!allocatorSupportsMemoryTagging<AllocatorConfig>() ||
         MemoryAddr + MemorySize < MemoryAddr)
       return;
 
@@ -998,7 +1004,7 @@ class Allocator {
 
   static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
                 "Minimal alignment must at least cover a chunk header.");
-  static_assert(!allocatorSupportsMemoryTagging<Config>() ||
+  static_assert(!allocatorSupportsMemoryTagging<AllocatorConfig>() ||
                     MinAlignment >= archMemoryTagGranuleSize(),
                 "");
 
@@ -1098,7 +1104,7 @@ class Allocator {
     const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
     if (LIKELY(Header->ClassId))
       return SizeOrUnusedBytes;
-    if (allocatorSupportsMemoryTagging<Config>())
+    if (allocatorSupportsMemoryTagging<AllocatorConfig>())
       Ptr = untagPointer(const_cast<void *>(Ptr));
     return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) -
            reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
@@ -1118,12 +1124,12 @@ class Allocator {
       Header->State = Chunk::State::Available;
     else
       Header->State = Chunk::State::Quarantined;
-    Header->OriginOrWasZeroed = useMemoryTagging<Config>(Options) &&
+    Header->OriginOrWasZeroed = useMemoryTagging<AllocatorConfig>(Options) &&
                                 Header->ClassId &&
                                 !TSDRegistry.getDisableMemInit();
     Chunk::storeHeader(Cookie, Ptr, Header);
 
-    if (UNLIKELY(useMemoryTagging<Config>(Options))) {
+    if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Options))) {
       u8 PrevTag = extractTag(reinterpret_cast<uptr>(TaggedPtr));
       storeDeallocationStackMaybe(Options, Ptr, PrevTag, Size);
       if (Header->ClassId) {
@@ -1140,7 +1146,7 @@ class Allocator {
       }
     }
     if (BypassQuarantine) {
-      if (allocatorSupportsMemoryTagging<Config>())
+      if (allocatorSupportsMemoryTagging<AllocatorConfig>())
         Ptr = untagPointer(Ptr);
       void *BlockBegin = getBlockBegin(Ptr, Header);
       const uptr ClassId = Header->ClassId;
@@ -1159,7 +1165,7 @@ class Allocator {
         if (CacheDrained)
           Primary.tryReleaseToOS(ClassId, ReleaseToOS::Normal);
       } else {
-        if (UNLIKELY(useMemoryTagging<Config>(Options)))
+        if (UNLIKELY(useMemoryTagging<AllocatorConfig>(Options)))
           storeTags(reinterpret_cast<uptr>(BlockBegin),
                     reinterpret_cast<uptr>(Ptr));
         Secondary.deallocate(Options, BlockBegin);
diff --git a/compiler-rt/lib/scudo/standalone/condition_variable.h b/compiler-rt/lib/scudo/standalone/condition_variable.h
index 4afebdc9d04c2c..3f16c86651e736 100644
--- a/compiler-rt/lib/scudo/standalone/condition_variable.h
+++ b/compiler-rt/lib/scudo/standalone/condition_variable.h
@@ -39,22 +39,6 @@ class ConditionVariableDummy
   }
 };
 
-template <typename Config, typename = const bool>
-struct ConditionVariableState {
-  static constexpr bool enabled() { return false; }
-  // This is only used for compilation purpose so that we won't end up having
-  // many conditional compilations. If you want to use `ConditionVariableDummy`,
-  // define `ConditionVariableT` in your allocator configuration. See
-  // allocator_config.h for more details.
-  using ConditionVariableT = ConditionVariableDummy;
-};
-
-template <typename Config>
-struct ConditionVariableState<Config, decltype(Config::UseConditionVariable)> {
-  static constexpr bool enabled() { return Config::UseConditionVariable; }
-  using ConditionVariableT = typename Config::ConditionVariableT;
-};
-
 } // namespace scudo
 
 #endif // SCUDO_CONDITION_VARIABLE_H_
diff --git a/compiler-rt/lib/scudo/standalone/memtag.h b/compiler-rt/lib/scudo/standalone/memtag.h
index aaed2192ad7521..1f6983e99404a2 100644
--- a/compiler-rt/lib/scudo/standalone/memtag.h
+++ b/compiler-rt/lib/scudo/standalone/memtag.h
@@ -326,7 +326,7 @@ inline void *addFixedTag(void *Ptr, uptr Tag) {
 
 template <typename Config>
 inline constexpr bool allocatorSupportsMemoryTagging() {
-  return archSupportsMemoryTagging() && Config::MaySupportMemoryTagging &&
+  return archSupportsMemoryTagging() && Config::getMaySupportMemoryTagging() &&
          (1 << SCUDO_MIN_ALIGNMENT_LOG) >= archMemoryTagGranuleSize();
 }
 
diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index 4d03b282d000de..5a046fbed69d7f 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -43,14 +43,13 @@ namespace scudo {
 
 template <typename Config> class SizeClassAllocator32 {
 public:
-  typedef typename Config::Primary::CompactPtrT CompactPtrT;
-  typedef typename Config::Primary::SizeClassMap SizeClassMap;
-  static const uptr GroupSizeLog = Config::Primary::GroupSizeLog;
+  typedef typename Config::CompactPtrT CompactPtrT;
+  typedef typename Config::SizeClassMap SizeClassMap;
+  static const uptr GroupSizeLog = Config::getGroupSizeLog();
   // The bytemap can only track UINT8_MAX - 1 classes.
   static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
   // Regions should be large enough to hold the largest Block.
-  static_assert((1UL << Config::Primary::RegionSizeLog) >=
-                    SizeClassMap::MaxSize,
+  static_assert((1UL << Config::getRegionSizeLog()) >= SizeClassMap::MaxSize,
                 "");
   typedef SizeClassAllocator32<Config> ThisT;
   typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
@@ -348,9 +347,9 @@ template <typename Config> class SizeClassAllocator32 {
 
   bool setOption(Option O, sptr Value) {
     if (O == Option::ReleaseInterval) {
-      const s32 Interval = Max(Min(static_cast<s32>(Value),
-                                   Config::Primary::MaxReleaseToOsIntervalMs),
-                               Config::Primary::MinReleaseToOsIntervalMs);
+      const s32 Interval = Max(
+          Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()),
+          Config::getMinReleaseToOsIntervalMs());
       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
       return true;
     }
@@ -390,9 +389,9 @@ template <typename Config> class SizeClassAllocator32 {
 
 private:
   static const uptr NumClasses = SizeClassMap::NumClasses;
-  static const uptr RegionSize = 1UL << Config::Primary::RegionSizeLog;
-  static const uptr NumRegions =
-      SCUDO_MMAP_RANGE_SIZE >> Config::Primary::RegionSizeLog;
+  static const uptr RegionSize = 1UL << Config::getRegionSizeLog();
+  static const uptr NumRegions = SCUDO_MMAP_RANGE_SIZE >>
+                                 Config::getRegionSizeLog();
   static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
   typedef FlatByteMap<NumRegions> ByteMap;
 
@@ -425,7 +424,7 @@ template <typename Config> class SizeClassAllocator32 {
   static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, "");
 
   uptr computeRegionId(uptr Mem) {
-    const uptr Id = Mem >> Config::Primary::RegionSizeLog;
+    const uptr Id = Mem >> Config::getRegionSizeLog();
     CHECK_LT(Id, NumRegions);
     return Id;
   }
@@ -454,7 +453,7 @@ template <typename Config> class SizeClassAllocator32 {
       unmap(reinterpret_cast<void *>(End), MapEnd - End);
 
     DCHECK_EQ(Region % RegionSize, 0U);
-    static_assert(Config::Primary::RegionSizeLog == GroupSizeLog,
+    static_assert(Config::getRegionSizeLog() == GroupSizeLog,
                   "Memory group should be the same size as Region");
 
     return Region;
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index 9a642d23620e1e..7d2f4836a82779 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -48,13 +48,16 @@ namespace scudo {
 
 template <typename Config> class SizeClassAllocator64 {
 public:
-  typedef typename Config::Primary::CompactPtrT CompactPtrT;
-  typedef typename Config::Primary::SizeClassMap SizeClassMap;
+  typedef typename Config::CompactPtrT CompactPtrT;
+  typedef typename Config::SizeClassMap SizeClassMap;
+#if 0
   typedef typename ConditionVariableState<
       typename Config::Primary>::ConditionVariableT ConditionVariableT;
-  static const uptr CompactPtrScale = Config::Primary::CompactPtrScale;
-  static const uptr RegionSizeLog = Config::Primary::RegionSizeLog;
-  static const uptr GroupSizeLog = Config::Primary::GroupSizeLog;
+#endif
+  typedef typename Config::ConditionVariableT ConditionVariableT;
+  static const uptr CompactPtrScale = Config::getCompactPtrScale();
+  static const uptr RegionSizeLog = Config::getRegionSizeLog();
+  static const uptr GroupSizeLog = Config::getGroupSizeLog();
   static_assert(RegionSizeLog >= GroupSizeLog,
                 "Group size shouldn't be greater than the region size");
   static const uptr GroupScale = GroupSizeLog - CompactPtrScale;
@@ -75,7 +78,7 @@ template <typename Config> class SizeClassAllocator64 {
   static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; }
 
   static bool conditionVariableEnabled() {
-    return ConditionVariableState<typename Config::Primary>::enabled();
+    return Config::hasConditionVariableT();
   }
 
   void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
@@ -136,7 +139,7 @@ template <typename Config> class SizeClassAllocator64 {
       // The actual start of a region is offset by a random number of pages
       // when PrimaryEnableRandomOffset is set.
       Region->RegionBeg = (PrimaryBase + (I << RegionSizeLog)) +
-                          (Config::Primary::EnableRandomOffset
+                          (Config::getEnableRandomOffset()
                                ? ((getRandomModN(&Seed, 16) + 1) * PageSize)
                                : 0);
       Region->RandState = getRandomU32(&Seed);
@@ -416,9 +419,9 @@ template <typename Config> class SizeClassAllocator64 {
 
   bool setOption(Option O, sptr Value) {
     if (O == Option::ReleaseInterval) {
-      const s32 Interval = Max(Min(static_cast<s32>(Value),
-                                   Config::Primary::MaxReleaseToOsIntervalMs),
-                               Config::Primary::MinReleaseToOsIntervalMs);
+      const s32 Interval = Max(
+          Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()),
+          Config::getMinReleaseToOsIntervalMs());
       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
       return true;
     }
@@ -532,7 +535,7 @@ template <typename Config> class SizeClassAllocator64 {
   static const uptr NumClasses = SizeClassMap::NumClasses;
   static const uptr PrimarySize = RegionSize * NumClasses;
 
-  static const uptr MapSizeIncrement = Config::Primary::MapSizeIncrement;
+  static const uptr MapSizeIncrement = Config::getMapSizeIncrement();
   // Fill at most this number of batches from the newly map'd memory.
   static const u32 MaxNumBatches = SCUDO_ANDROID ? 4U : 8U;
 
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index f52a4188bcf3a6..7a185d2bd1ab5e 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -151,8 +151,6 @@ template <typename T> class NonZeroLengthArray<T, 0> {
 
 template <typename Config> class MapAllocatorCache {
 public:
-  using CacheConfig = typename Config::Secondary::Cache;
-
   void getStats(ScopedString *Str) {
     ScopedLock L(Mutex);
     uptr Integral;
@@ -177,16 +175,16 @@ template <typename Config> class MapAllocatorCache {
   }
 
   // Ensure the default maximum specified fits the array.
-  static_assert(CacheConfig::DefaultMaxEntriesCount <=
-                    CacheConfig::EntriesArraySize,
+  static_assert(Config::getDefaultMaxEntriesCount() <=
+                    Config::getEntriesArraySize(),
                 "");
 
   void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS {
     DCHECK_EQ(EntriesCount, 0U);
     setOption(Option::MaxCacheEntriesCount,
-              static_cast<sptr>(CacheConfig::DefaultMaxEntriesCount));
+              static_cast<sptr>(Config::getDefaultMaxEntriesCount()));
     setOption(Option::MaxCacheEntrySize,
-              static_cast<sptr>(CacheConfig::DefaultMaxEntrySize));
+              static_cast<sptr>(Config::getDefaultMaxEntrySize()));
     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
   }
 
@@ -231,9 +229,9 @@ template <typename Config> class MapAllocatorCache {
         // just unmap it.
         break;
       }
-      if (CacheConfig::QuarantineSize && useMemoryTagging<Config>(Options)) {
+      if (Config::getQuarantineSize() && useMemoryTagging<Config>(Options)) {
         QuarantinePos =
-            (QuarantinePos + 1) % Max(CacheConfig::QuarantineSize, 1u);
+            (QuarantinePos + 1) % Max(Config::getQuarantineSize(), 1u);
         if (!Quarantine[QuarantinePos].isValid()) {
           Quarantine[QuarantinePos] = Entry;
           return;
@@ -360,14 +358,14 @@ template <typename Config> class MapAllocatorCache {
   bool setOption(Option O, sptr Value) {
     if (O == Option::ReleaseInterval) {
       const s32 Interval = Max(
-          Min(static_cast<s32>(Value), CacheConfig::MaxReleaseToOsIntervalMs),
-          CacheConfig::MinReleaseToOsIntervalMs);
+          Min(static_cast<s32>(Value), Config::getMaxReleaseToOsIntervalMs()),
+          Config::getMinReleaseToOsIntervalMs());
       atomic_store_relaxed(&ReleaseToOsIntervalMs, Interval);
       return true;
     }
     if (O == Option::MaxCacheEntriesCount) {
       const u32 MaxCount = static_cast<u32>(Value);
-      if (MaxCount > CacheConfig::EntriesArraySize)
+      if (MaxCount > Config::getEntriesArraySize())
         return false;
       atomic_store_relaxed(&MaxEntriesCount, MaxCount);
       return true;
@@ -384,7 +382,7 @@ template <typename Config> class MapAllocatorCache {
 
   void disableMemoryTagging() EXCLUDES(Mutex) {
     ScopedLock L(Mutex);
-    for (u32 I = 0; I != CacheConfig::QuarantineSize; ++I) {
+    for (u32 I = 0; I != Config::getQuarantineSize(); ++I) {
       if (Quarantine[I].isValid()) {
         MemMapT &MemMap = Quarantine[I].MemMap;
         MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
@@ -409,11 +407,11 @@ template <typename Config> class MapAllocatorCache {
 
 private:
   void empty() {
-    MemMapT MapInfo[CacheConfig::EntriesArraySize];
+    MemMapT MapInfo[Config::getEntriesArraySize()];
     uptr N = 0;
     {
       ScopedLock L(Mutex);
-      for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++) {
+      for (uptr I = 0; I < Config::getEntriesArraySize(); I++) {
         if (!Entries[I].isValid())
           continue;
         MapInfo[N] = Entries[I].MemMap;
@@ -446,9 +444,9 @@ template <typename Config> class MapAllocatorCache {
     if (!EntriesCount || OldestTime == 0 || OldestTime > Time)
       return;
     OldestTime = 0;
-    for (uptr I = 0; I < CacheConfig::QuarantineSize; I++)
+    for (uptr I = 0; I < Config::getQuarantineSize(); I++)
       releaseIfOlderThan(Quarantine[I], Time);
-    for (uptr I = 0; I < CacheConfig::EntriesArraySize; I++)
+    for (uptr I = 0; I < Config::getEntriesArraySize(); I++)
       releaseIfOlderThan(Entries[I], Time);
   }
 
@@ -463,8 +461,8 @@ template <typename Config> class MapAllocatorCache {
   u32 CallsToRetrieve GUARDED_BY(Mutex) = 0;
   u32 SuccessfulRetrieves GUARDED_BY(Mutex) = 0;
 
-  CachedBlock Entries[CacheConfig::EntriesArraySize] GUARDED_BY(Mutex) = {};
-  NonZeroLengthArray<CachedBlock, CacheConfig::QuarantineSize>
+  CachedBlock Entries[Config::getEntriesArraySize()] GUARDED_BY(Mutex) = {};
+  NonZeroLengthArray<CachedBlock, Config::getQuarantineSize()>
       Quarantine GUARDED_BY(Mutex) = {};
 };
 
@@ -533,7 +531,7 @@ template <typename Config> class MapAllocator {
   void getStats(ScopedString *Str);
 
 private:
-  typename Config::Secondary::template CacheT<Config> Cache;
+  typename Config::template CacheT<typename Config::CacheConfig> Cache;
 
   mutable HybridMutex Mutex;
   DoublyLinkedList<LargeBlock::Header> InUseBlocks GUARDED_BY(Mutex);
diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index 3dbd93cacefd68..46f17c642d0acd 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -189,7 +189,6 @@ struct TestConditionVariableConfig {
 #endif
     static const scudo::s32 MinReleaseToOsIntervalMs = 1000;
     static const scudo::s32 MaxReleaseToOsIntervalMs = 1000;
-    static const bool UseConditionVariable = true;
 #if SCUDO_LINUX
     using ConditionVariableT = scudo::ConditionVariableLinux;
 #else
diff --git a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
index 18171511758a14..9e34bba25ee6ff 100644
--- a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp
@@ -9,6 +9,7 @@
 #include "tests/scudo_unit_test.h"
 
 #include "allocator_config.h"
+#include "allocator_config_wrapper.h"
 #include "condition_variable.h"
 #include "primary32.h"
 #include "primary64.h"
@@ -29,6 +30,9 @@
 
 template <typename SizeClassMapT> struct TestConfig1 {
   static const bool MaySupportMemoryTagging = false;
+  template <typename> using TSDRegistryT = void;
+  template <typename> using PrimaryT = void;
+  template <typename> using SecondaryT = void;
 
   struct Primary {
     using SizeClassMap = SizeClassMapT;
@@ -45,6 +49,9 @@ template <typename SizeClassMapT> struct TestConfig1 {
 
 template <typename SizeClassMapT> struct TestConfig2 {
   static const bool MaySupportMemoryTagging = false;
+  template <typename> using TSDRegistryT = void;
+  template <typename> using PrimaryT = void;
+  template <typename> using SecondaryT = void;
 
   struct Primary {
     using SizeClassMap = SizeClassMapT;
@@ -66,6 +73,9 @@ template <typename SizeClassMapT> struct TestConfig2 {
 
 template <typename SizeClassMapT> struct TestConfig3 {
   static const bool MaySupportMemoryTagging = true;
+  template <typename> using TSDRegistryT = void;
+  template <typename> using PrimaryT = void;
+  template <typename> using SecondaryT = void;
 
   struct Primary {
     using SizeClassMap = SizeClassMapT;
@@ -87,6 +97,9 @@ template <typename SizeClassMapT> struct TestConfig3 {
 
 template <typename SizeClassMapT> struct TestConfig4 {
   static const bool MaySupportMemoryTagging = true;
+  template <typename> using TSDRegistryT = void;
+  template <typename> using PrimaryT = void;
+  template <typename> using SecondaryT = void;
 
   struct Primary {
     using SizeClassMap = SizeClassMapT;
@@ -109,6 +122,9 @@ template <typename SizeClassMapT> struct TestConfig4 {
 // This is the only test config that enables the condition variable.
 template <typename SizeClassMapT> struct TestConfig5 {
   static const bool MaySupportMemoryTagging = true;
+  template <typename> using TSDRegistryT = void;
+  template <typename> using PrimaryT = void;
+  template <typename> using SecondaryT = void;
 
   struct Primary {
     using SizeClassMap = SizeClassMapT;
@@ -125,7 +141,6 @@ template <typename SizeClassMapT> struct TestConfig5 {
     typedef scudo::u32 CompactPtrT;
     static const bool EnableRandomOffset = true;
     static const scudo::uptr MapSizeIncrement = 1UL << 18;
-    static const bool UseConditionVariable = true;
 #if SCUDO_LINUX
     using ConditionVariableT = scudo::ConditionVariableLinux;
 #else
@@ -139,10 +154,12 @@ struct Config : public BaseConfig<SizeClassMapT> {};
 
 template <template <typename> class BaseConfig, typename SizeClassMapT>
 struct SizeClassAllocator
-    : public scudo::SizeClassAllocator64<Config<BaseConfig, SizeClassMapT>> {};
+    : public scudo::SizeClassAllocator64<
+          scudo::PrimaryConfig<Config<BaseConfig, SizeClassMapT>>> {};
 template <typename SizeClassMapT>
 struct SizeClassAllocator<TestConfig1, SizeClassMapT>
-    : public scudo::SizeClassAllocator32<Config<TestConfig1, SizeClassMapT>> {};
+    : public scudo::SizeClassAllocator32<
+          scudo::PrimaryConfig<Config<TestConfig1, SizeClassMapT>>> {};
 
 template <template <typename> class BaseConfig, typename SizeClassMapT>
 struct TestAllocator : public SizeClassAllocator<BaseConfig, SizeClassMapT> {
@@ -219,6 +236,9 @@ SCUDO_TYPED_TEST(ScudoPrimaryTest, BasicPrimary) {
 
 struct SmallRegionsConfig {
   static const bool MaySupportMemoryTagging = false;
+  template <typename> using TSDRegistryT = void;
+  template <typename> using PrimaryT = void;
+  template <typename> using SecondaryT = void;
 
   struct Primary {
     using SizeClassMap = scudo::DefaultSizeClassMap;
@@ -236,7 +256,8 @@ struct SmallRegionsConfig {
 // The 64-bit SizeClassAllocator can be easily OOM'd with small region sizes.
 // For the 32-bit one, it requires actually exhausting memory, so we skip it.
 TEST(ScudoPrimaryTest, Primary64OOM) {
-  using Primary = scudo::SizeClassAllocator64<SmallRegionsConfig>;
+  using Primary =
+      scudo::SizeClassAllocator64<scudo::PrimaryConfig<SmallRegionsConfig>>;
   using TransferBatch = Primary::TransferBatchT;
   Primary Allocator;
   Allocator.init(/*ReleaseToOsInterval=*/-1);
diff --git a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
index 18d2e187fa3ce2..8f0250e88ebf3a 100644
--- a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp
@@ -10,6 +10,7 @@
 #include "tests/scudo_unit_test.h"
 
 #include "allocator_config.h"
+#include "allocator_config_wrapper.h"
 #include "secondary.h"
 
 #include <algorithm>
@@ -22,7 +23,8 @@
 #include <vector>
 
 template <typename Config> static scudo::Options getOptionsForConfig() {
-  if (!Config::MaySupportMemoryTagging || !scudo::archSupportsMemoryTagging() ||
+  if (!Config::getMaySupportMemoryTagging() ||
+      !scudo::archSupportsMemoryTagging() ||
       !scudo::systemSupportsMemoryTagging())
     return {};
   scudo::AtomicOptions AO;
@@ -31,8 +33,9 @@ template <typename Config> static scudo::Options getOptionsForConfig() {
 }
 
 template <typename Config> static void testSecondaryBasic(void) {
-  using SecondaryT = scudo::MapAllocator<Config>;
-  scudo::Options Options = getOptionsForConfig<Config>();
+  using SecondaryT = scudo::MapAllocator<scudo::SecondaryConfig<Config>>;
+  scudo::Options Options =
+      getOptionsForConfig<scudo::SecondaryConfig<Config>>();
 
   scudo::GlobalStats S;
   S.init();
@@ -84,6 +87,10 @@ template <typename Config> static void testSecondaryBasic(void) {
 
 struct NoCacheConfig {
   static const bool MaySupportMemoryTagging = false;
+  template <typename> using TSDRegistryT = void;
+  template <typename> using PrimaryT = void;
+  template <typename Config> using SecondaryT = scudo::MapAllocator<Config>;
+
   struct Secondary {
     template <typename Config>
     using CacheT = scudo::MapAllocatorNoCache<Config>;
@@ -92,6 +99,10 @@ struct NoCacheConfig {
 
 struct TestConfig {
   static const bool MaySupportMemoryTagging = false;
+  template <typename> using TSDRegistryT = void;
+  template <typename> using PrimaryT = void;
+  template <typename> using SecondaryT = void;
+
   struct Secondary {
     struct Cache {
       static const scudo::u32 EntriesArraySize = 128U;
@@ -114,7 +125,7 @@ TEST(ScudoSecondaryTest, SecondaryBasic) {
 
 struct MapAllocatorTest : public Test {
   using Config = scudo::DefaultConfig;
-  using LargeAllocator = scudo::MapAllocator<Config>;
+  using LargeAllocator = scudo::MapAllocator<scudo::SecondaryConfig<Config>>;
 
   void SetUp() override { Allocator->init(nullptr); }
 
@@ -122,7 +133,8 @@ struct MapAllocatorTest : public Test {
 
   std::unique_ptr<LargeAllocator> Allocator =
       std::make_unique<LargeAllocator>();
-  scudo::Options Options = getOptionsForConfig<Config>();
+  scudo::Options Options =
+      getOptionsForConfig<scudo::SecondaryConfig<Config>>();
 };
 
 // This exercises a variety of combinations of size and alignment for the



More information about the llvm-commits mailing list