[compiler-rt] 719ab73 - scudo: Make it thread-safe to set some runtime configuration flags.

Peter Collingbourne via llvm-commits llvm-commits at lists.llvm.org
Wed Sep 30 09:43:21 PDT 2020


Author: Peter Collingbourne
Date: 2020-09-30T09:42:45-07:00
New Revision: 719ab7309eb7b7b5d802273b0f1871d6cdb965b1

URL: https://github.com/llvm/llvm-project/commit/719ab7309eb7b7b5d802273b0f1871d6cdb965b1
DIFF: https://github.com/llvm/llvm-project/commit/719ab7309eb7b7b5d802273b0f1871d6cdb965b1.diff

LOG: scudo: Make it thread-safe to set some runtime configuration flags.

Move some of the flags previously in Options, as well as the
UseMemoryTagging flag previously in the primary allocator, into an
atomic variable so that it can be updated while other threads are
running. Relaxed accesses are used because we only have the requirement
that the other threads see the new value eventually.

The code is set up so that the variable is generally loaded once per
allocation function call with the exception of some rarely used code
such as error handlers. The flag bits can generally stay in a register
during the execution of the allocation function which means that they
can be branched on with minimal overhead (e.g. TBZ on aarch64).

Differential Revision: https://reviews.llvm.org/D88523

Added: 
    compiler-rt/lib/scudo/standalone/options.h

Modified: 
    compiler-rt/lib/scudo/standalone/atomic_helpers.h
    compiler-rt/lib/scudo/standalone/combined.h
    compiler-rt/lib/scudo/standalone/primary32.h
    compiler-rt/lib/scudo/standalone/primary64.h
    compiler-rt/lib/scudo/standalone/wrappers_c.inc

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/standalone/atomic_helpers.h b/compiler-rt/lib/scudo/standalone/atomic_helpers.h
index 0946a3308172..d88f5d7be642 100644
--- a/compiler-rt/lib/scudo/standalone/atomic_helpers.h
+++ b/compiler-rt/lib/scudo/standalone/atomic_helpers.h
@@ -89,6 +89,20 @@ inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
   return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
 }
 
+template <typename T>
+inline typename T::Type atomic_fetch_and(volatile T *A, typename T::Type V,
+                                         memory_order MO) {
+  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+  return __atomic_fetch_and(&A->ValDoNotUse, V, MO);
+}
+
+template <typename T>
+inline typename T::Type atomic_fetch_or(volatile T *A, typename T::Type V,
+                                        memory_order MO) {
+  DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
+  return __atomic_fetch_or(&A->ValDoNotUse, V, MO);
+}
+
 template <typename T>
 inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
                                         memory_order MO) {

diff  --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index e39871dc4704..2a891e44579a 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -15,6 +15,7 @@
 #include "flags_parser.h"
 #include "local_cache.h"
 #include "memtag.h"
+#include "options.h"
 #include "quarantine.h"
 #include "report.h"
 #include "secondary.h"
@@ -144,16 +145,19 @@ class Allocator {
     reportUnrecognizedFlags();
 
     // Store some flags locally.
-    Options.MayReturnNull = getFlags()->may_return_null;
-    Options.FillContents =
-        getFlags()->zero_contents
-            ? ZeroFill
-            : (getFlags()->pattern_fill_contents ? PatternOrZeroFill : NoFill);
-    Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch;
-    Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch;
-    Options.TrackAllocationStacks = false;
-    Options.UseOddEvenTags = true;
-    Options.QuarantineMaxChunkSize =
+    if (getFlags()->may_return_null)
+      Primary.Options.set(OptionBit::MayReturnNull);
+    if (getFlags()->zero_contents)
+      Primary.Options.setFillContentsMode(ZeroFill);
+    else if (getFlags()->pattern_fill_contents)
+      Primary.Options.setFillContentsMode(PatternOrZeroFill);
+    if (getFlags()->dealloc_type_mismatch)
+      Primary.Options.set(OptionBit::DeallocTypeMismatch);
+    if (getFlags()->delete_size_mismatch)
+      Primary.Options.set(OptionBit::DeleteSizeMismatch);
+    Primary.Options.set(OptionBit::UseOddEvenTags);
+
+    QuarantineMaxChunkSize =
         static_cast<u32>(getFlags()->quarantine_max_chunk_size);
 
     Stats.initLinkerInitialized();
@@ -250,8 +254,8 @@ class Allocator {
 #endif
   }
 
-  uptr computeOddEvenMaskForPointerMaybe(uptr Ptr, uptr Size) {
-    if (!Options.UseOddEvenTags)
+  uptr computeOddEvenMaskForPointerMaybe(Options Options, uptr Ptr, uptr Size) {
+    if (!Options.get(OptionBit::UseOddEvenTags))
       return 0;
 
     // If a chunk's tag is odd, we want the tags of the surrounding blocks to be
@@ -267,6 +271,7 @@ class Allocator {
                           uptr Alignment = MinAlignment,
                           bool ZeroContents = false) {
     initThreadMaybe();
+    Options Options = Primary.Options.load();
 
 #ifdef GWP_ASAN_HOOKS
     if (UNLIKELY(GuardedAlloc.shouldSample())) {
@@ -278,10 +283,10 @@ class Allocator {
     const FillContentsMode FillContents = ZeroContents ? ZeroFill
                                           : TSDRegistry.getDisableMemInit()
                                               ? NoFill
-                                              : Options.FillContents;
+                                              : Options.getFillContentsMode();
 
     if (UNLIKELY(Alignment > MaxAlignment)) {
-      if (Options.MayReturnNull)
+      if (Options.get(OptionBit::MayReturnNull))
         return nullptr;
       reportAlignmentTooBig(Alignment, MaxAlignment);
     }
@@ -300,7 +305,7 @@ class Allocator {
     // Takes care of extravagantly large sizes as well as integer overflows.
     static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, "");
     if (UNLIKELY(Size >= MaxAllowedMallocSize)) {
-      if (Options.MayReturnNull)
+      if (Options.get(OptionBit::MayReturnNull))
         return nullptr;
       reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize);
     }
@@ -336,7 +341,7 @@ class Allocator {
                                  FillContents);
 
     if (UNLIKELY(!Block)) {
-      if (Options.MayReturnNull)
+      if (Options.get(OptionBit::MayReturnNull))
         return nullptr;
       reportOutOfMemory(NeededSize);
     }
@@ -359,7 +364,7 @@ class Allocator {
       //
       // When memory tagging is enabled, zeroing the contents is done as part of
       // setting the tag.
-      if (UNLIKELY(useMemoryTagging())) {
+      if (UNLIKELY(useMemoryTagging(Options))) {
         uptr PrevUserPtr;
         Chunk::UnpackedHeader Header;
         const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
@@ -424,10 +429,10 @@ class Allocator {
           }
         } else {
           const uptr OddEvenMask =
-              computeOddEvenMaskForPointerMaybe(BlockUptr, BlockSize);
+              computeOddEvenMaskForPointerMaybe(Options, BlockUptr, BlockSize);
           TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd);
         }
-        storeAllocationStackMaybe(Ptr);
+        storeAllocationStackMaybe(Options, Ptr);
       } else if (UNLIKELY(FillContents != NoFill)) {
         // This condition is not necessarily unlikely, but since memset is
         // costly, we might as well mark it as such.
@@ -471,6 +476,7 @@ class Allocator {
     // the TLS destructors, ending up in initialized thread specific data never
     // being destroyed properly. Any other heap operation will do a full init.
     initThreadMaybe(/*MinimalInit=*/true);
+    Options Options = Primary.Options.load();
 
 #ifdef GWP_ASAN_HOOKS
     if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
@@ -494,7 +500,7 @@ class Allocator {
 
     if (UNLIKELY(Header.State != Chunk::State::Allocated))
       reportInvalidChunkState(AllocatorAction::Deallocating, Ptr);
-    if (Options.DeallocTypeMismatch) {
+    if (Options.get(OptionBit::DeallocTypeMismatch)) {
       if (Header.OriginOrWasZeroed != Origin) {
         // With the exception of memalign'd chunks, that can be still be free'd.
         if (UNLIKELY(Header.OriginOrWasZeroed != Chunk::Origin::Memalign ||
@@ -505,19 +511,20 @@ class Allocator {
     }
 
     const uptr Size = getSize(Ptr, &Header);
-    if (DeleteSize && Options.DeleteSizeMismatch) {
+    if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) {
       if (UNLIKELY(DeleteSize != Size))
         reportDeleteSizeMismatch(Ptr, DeleteSize, Size);
     }
 
-    quarantineOrDeallocateChunk(Ptr, &Header, Size);
+    quarantineOrDeallocateChunk(Options, Ptr, &Header, Size);
   }
 
   void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) {
     initThreadMaybe();
+    Options Options = Primary.Options.load();
 
     if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) {
-      if (Options.MayReturnNull)
+      if (Options.get(OptionBit::MayReturnNull))
         return nullptr;
       reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize);
     }
@@ -552,7 +559,7 @@ class Allocator {
     // Pointer has to be allocated with a malloc-type function. Some
     // applications think that it is OK to realloc a memalign'ed pointer, which
     // will trigger this check. It really isn't.
-    if (Options.DeallocTypeMismatch) {
+    if (Options.get(OptionBit::DeallocTypeMismatch)) {
       if (UNLIKELY(OldHeader.OriginOrWasZeroed != Chunk::Origin::Malloc))
         reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr,
                                   OldHeader.OriginOrWasZeroed,
@@ -583,11 +590,11 @@ class Allocator {
                      : BlockEnd - (reinterpret_cast<uptr>(OldPtr) + NewSize)) &
             Chunk::SizeOrUnusedBytesMask;
         Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
-        if (UNLIKELY(ClassId && useMemoryTagging())) {
+        if (UNLIKELY(ClassId && useMemoryTagging(Options))) {
           resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
                             reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
                             BlockEnd);
-          storeAllocationStackMaybe(OldPtr);
+          storeAllocationStackMaybe(Options, OldPtr);
         }
         return OldTaggedPtr;
       }
@@ -601,7 +608,7 @@ class Allocator {
     if (NewPtr) {
       const uptr OldSize = getSize(OldPtr, &OldHeader);
       memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize));
-      quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
+      quarantineOrDeallocateChunk(Options, OldPtr, &OldHeader, OldSize);
     }
     return NewPtr;
   }
@@ -682,7 +689,7 @@ class Allocator {
       if (getChunkFromBlock(Block, &Chunk, &Header) &&
           Header.State == Chunk::State::Allocated) {
         uptr TaggedChunk = Chunk;
-        if (useMemoryTagging())
+        if (useMemoryTagging(Primary.Options.load()))
           TaggedChunk = loadTag(Chunk);
         Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
                  Arg);
@@ -697,7 +704,7 @@ class Allocator {
 
   bool canReturnNull() {
     initThreadMaybe();
-    return Options.MayReturnNull;
+    return Primary.Options.load().get(OptionBit::MayReturnNull);
   }
 
   bool setOption(Option O, sptr Value) {
@@ -711,9 +718,9 @@ class Allocator {
       // any particular chunk is cut in half. Therefore we use this tuning
       // setting to control whether odd/even tags are enabled.
       if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW)
-        Options.UseOddEvenTags = true;
+        Primary.Options.set(OptionBit::UseOddEvenTags);
       else if (Value == M_MEMTAG_TUNING_UAF)
-        Options.UseOddEvenTags = false;
+        Primary.Options.clear(OptionBit::UseOddEvenTags);
       return true;
     } else {
       // We leave it to the various sub-components to decide whether or not they
@@ -773,18 +780,26 @@ class Allocator {
            Header.State == Chunk::State::Allocated;
   }
 
-  bool useMemoryTagging() { return Primary.useMemoryTagging(); }
+  bool useMemoryTagging() const {
+    return useMemoryTagging(Primary.Options.load());
+  }
+  static bool useMemoryTagging(Options Options) {
+    return PrimaryT::useMemoryTagging(Options);
+  }
 
   void disableMemoryTagging() { Primary.disableMemoryTagging(); }
 
   void setTrackAllocationStacks(bool Track) {
     initThreadMaybe();
-    Options.TrackAllocationStacks = Track;
+    if (Track)
+      Primary.Options.set(OptionBit::TrackAllocationStacks);
+    else
+      Primary.Options.clear(OptionBit::TrackAllocationStacks);
   }
 
   void setFillContents(FillContentsMode FillContents) {
     initThreadMaybe();
-    Options.FillContents = FillContents;
+    Primary.Options.setFillContentsMode(FillContents);
   }
 
   const char *getStackDepotAddress() const {
@@ -951,16 +966,7 @@ class Allocator {
   static const uptr MaxTraceSize = 64;
 
   u32 Cookie;
-
-  struct {
-    u8 MayReturnNull : 1;              // may_return_null
-    FillContentsMode FillContents : 2; // zero_contents, pattern_fill_contents
-    u8 DeallocTypeMismatch : 1;        // dealloc_type_mismatch
-    u8 DeleteSizeMismatch : 1;         // delete_size_mismatch
-    u8 TrackAllocationStacks : 1;
-    u8 UseOddEvenTags : 1;
-    u32 QuarantineMaxChunkSize; // quarantine_max_chunk_size
-  } Options;
+  u32 QuarantineMaxChunkSize;
 
   GlobalStats Stats;
   PrimaryT Primary;
@@ -1025,15 +1031,15 @@ class Allocator {
            reinterpret_cast<uptr>(Ptr) - SizeOrUnusedBytes;
   }
 
-  void quarantineOrDeallocateChunk(void *Ptr, Chunk::UnpackedHeader *Header,
-                                   uptr Size) {
+  void quarantineOrDeallocateChunk(Options Options, void *Ptr,
+                                   Chunk::UnpackedHeader *Header, uptr Size) {
     Chunk::UnpackedHeader NewHeader = *Header;
-    if (UNLIKELY(NewHeader.ClassId && useMemoryTagging())) {
+    if (UNLIKELY(NewHeader.ClassId && useMemoryTagging(Options))) {
       u8 PrevTag = extractTag(loadTag(reinterpret_cast<uptr>(Ptr)));
       if (!TSDRegistry.getDisableMemInit()) {
         uptr TaggedBegin, TaggedEnd;
         const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe(
-            reinterpret_cast<uptr>(getBlockBegin(Ptr, &NewHeader)),
+            Options, reinterpret_cast<uptr>(getBlockBegin(Ptr, &NewHeader)),
             SizeClassMap::getSizeByClassId(NewHeader.ClassId));
         // Exclude the previous tag so that immediate use after free is detected
         // 100% of the time.
@@ -1041,14 +1047,14 @@ class Allocator {
                      &TaggedEnd);
       }
       NewHeader.OriginOrWasZeroed = !TSDRegistry.getDisableMemInit();
-      storeDeallocationStackMaybe(Ptr, PrevTag);
+      storeDeallocationStackMaybe(Options, Ptr, PrevTag);
     }
     // If the quarantine is disabled, the actual size of a chunk is 0 or larger
     // than the maximum allowed, we return a chunk directly to the backend.
     // Logical Or can be short-circuited, which introduces unnecessary
     // conditional jumps, so use bitwise Or and let the compiler be clever.
-    const bool BypassQuarantine = !Quarantine.getCacheSize() | !Size |
-                                  (Size > Options.QuarantineMaxChunkSize);
+    const bool BypassQuarantine =
+        !Quarantine.getCacheSize() | !Size | (Size > QuarantineMaxChunkSize);
     if (BypassQuarantine) {
       NewHeader.State = Chunk::State::Available;
       Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header);
@@ -1089,16 +1095,17 @@ class Allocator {
     return Offset + Chunk::getHeaderSize();
   }
 
-  void storeAllocationStackMaybe(void *Ptr) {
-    if (!UNLIKELY(Options.TrackAllocationStacks))
+  void storeAllocationStackMaybe(Options Options, void *Ptr) {
+    if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
       return;
     auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
     Ptr32[MemTagAllocationTraceIndex] = collectStackTrace();
     Ptr32[MemTagAllocationTidIndex] = getThreadID();
   }
 
-  void storeDeallocationStackMaybe(void *Ptr, uint8_t PrevTag) {
-    if (!UNLIKELY(Options.TrackAllocationStacks))
+  void storeDeallocationStackMaybe(Options Options, void *Ptr,
+                                   uint8_t PrevTag) {
+    if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
       return;
 
     // Disable tag checks here so that we don't need to worry about zero sized

diff  --git a/compiler-rt/lib/scudo/standalone/options.h b/compiler-rt/lib/scudo/standalone/options.h
new file mode 100644
index 000000000000..4f387a37f482
--- /dev/null
+++ b/compiler-rt/lib/scudo/standalone/options.h
@@ -0,0 +1,72 @@
+//===-- options.h -----------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_OPTIONS_H_
+#define SCUDO_OPTIONS_H_
+
+#include "atomic_helpers.h"
+#include "common.h"
+
+namespace scudo {
+
+enum class OptionBit {
+  MayReturnNull,
+  FillContents0of2,
+  FillContents1of2,
+  DeallocTypeMismatch,
+  DeleteSizeMismatch,
+  TrackAllocationStacks,
+  UseOddEvenTags,
+  UseMemoryTagging,
+};
+
+struct Options {
+  u32 Val;
+
+  bool get(OptionBit Opt) const { return Val & (1U << static_cast<u32>(Opt)); }
+
+  FillContentsMode getFillContentsMode() const {
+    return static_cast<FillContentsMode>(
+        (Val >> static_cast<u32>(OptionBit::FillContents0of2)) & 3);
+  }
+};
+
+struct AtomicOptions {
+  atomic_u32 Val;
+
+public:
+  Options load() const {
+    return Options{atomic_load(&Val, memory_order_relaxed)};
+  }
+
+  void clear(OptionBit Opt) {
+    atomic_fetch_and(&Val, ~(1U << static_cast<u32>(Opt)),
+                     memory_order_relaxed);
+  }
+
+  void set(OptionBit Opt) {
+    atomic_fetch_or(&Val, 1U << static_cast<u32>(Opt), memory_order_relaxed);
+  }
+
+  void setFillContentsMode(FillContentsMode FillContents) {
+    while (1) {
+      u32 Opts = atomic_load(&Val, memory_order_relaxed);
+      u32 NewOpts = Opts;
+      NewOpts &= ~(3U << static_cast<u32>(OptionBit::FillContents0of2));
+      NewOpts |= static_cast<u32>(FillContents)
+                 << static_cast<u32>(OptionBit::FillContents0of2);
+      if (atomic_compare_exchange_strong(&Val, &Opts, NewOpts,
+                                         memory_order_relaxed))
+        break;
+    }
+  }
+};
+
+} // namespace scudo
+
+#endif // SCUDO_OPTIONS_H_

diff  --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index 0a985fb67bea..a159a584c7cb 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -13,6 +13,7 @@
 #include "common.h"
 #include "list.h"
 #include "local_cache.h"
+#include "options.h"
 #include "release.h"
 #include "report.h"
 #include "stats.h"
@@ -206,7 +207,10 @@ class SizeClassAllocator32 {
     return TotalReleasedBytes;
   }
 
-  bool useMemoryTagging() { return false; }
+  static bool useMemoryTagging(Options Options) {
+    (void)Options;
+    return false;
+  }
   void disableMemoryTagging() {}
 
   const char *getRegionInfoArrayAddress() const { return nullptr; }
@@ -218,6 +222,8 @@ class SizeClassAllocator32 {
     return {};
   }
 
+  AtomicOptions Options;
+
 private:
   static const uptr NumClasses = SizeClassMap::NumClasses;
   static const uptr RegionSize = 1UL << RegionSizeLog;

diff  --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index 933b1ee7c967..1f7ac38cefed 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -14,6 +14,7 @@
 #include "list.h"
 #include "local_cache.h"
 #include "memtag.h"
+#include "options.h"
 #include "release.h"
 #include "stats.h"
 #include "string_utils.h"
@@ -93,8 +94,8 @@ class SizeClassAllocator64 {
     }
     setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
 
-    if (SupportsMemoryTagging)
-      UseMemoryTagging = systemSupportsMemoryTagging();
+    if (SupportsMemoryTagging && systemSupportsMemoryTagging())
+      Options.set(OptionBit::UseMemoryTagging);
   }
   void init(s32 ReleaseToOsInterval) {
     memset(this, 0, sizeof(*this));
@@ -207,10 +208,10 @@ class SizeClassAllocator64 {
     return TotalReleasedBytes;
   }
 
-  bool useMemoryTagging() const {
-    return SupportsMemoryTagging && UseMemoryTagging;
+  static bool useMemoryTagging(Options Options) {
+    return SupportsMemoryTagging && Options.get(OptionBit::UseMemoryTagging);
   }
-  void disableMemoryTagging() { UseMemoryTagging = false; }
+  void disableMemoryTagging() { Options.clear(OptionBit::UseMemoryTagging); }
 
   const char *getRegionInfoArrayAddress() const {
     return reinterpret_cast<const char *>(RegionInfoArray);
@@ -262,6 +263,8 @@ class SizeClassAllocator64 {
     return B;
   }
 
+  AtomicOptions Options;
+
 private:
   static const uptr RegionSize = 1UL << RegionSizeLog;
   static const uptr NumClasses = SizeClassMap::NumClasses;
@@ -306,7 +309,6 @@ class SizeClassAllocator64 {
   uptr PrimaryBase;
   MapPlatformData Data;
   atomic_s32 ReleaseToOsIntervalMs;
-  bool UseMemoryTagging;
   alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses];
 
   RegionInfo *getRegionInfo(uptr ClassId) {
@@ -373,7 +375,7 @@ class SizeClassAllocator64 {
       if (UNLIKELY(!map(reinterpret_cast<void *>(RegionBeg + MappedUser),
                         UserMapSize, "scudo:primary",
                         MAP_ALLOWNOMEM | MAP_RESIZABLE |
-                            (useMemoryTagging() ? MAP_MEMTAG : 0),
+                            (useMemoryTagging(Options.load()) ? MAP_MEMTAG : 0),
                         &Region->Data)))
         return nullptr;
       Region->MappedUser += UserMapSize;

diff  --git a/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/compiler-rt/lib/scudo/standalone/wrappers_c.inc
index 7386a0053a0f..9d640038d8e2 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c.inc
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c.inc
@@ -234,30 +234,26 @@ INTERFACE WEAK int SCUDO_PREFIX(malloc_info)(UNUSED int options, FILE *stream) {
 
 // Disable memory tagging for the heap. The caller must disable memory tag
 // checks globally (e.g. by clearing TCF0 on aarch64) before calling this
-// function, and may not re-enable them after calling the function. The program
-// must be single threaded at the point when the function is called.
+// function, and may not re-enable them after calling the function.
 INTERFACE WEAK void SCUDO_PREFIX(malloc_disable_memory_tagging)() {
   SCUDO_ALLOCATOR.disableMemoryTagging();
 }
 
 // Sets whether scudo records stack traces and other metadata for allocations
 // and deallocations. This function only has an effect if the allocator and
-// hardware support memory tagging. The program must be single threaded at the
-// point when the function is called.
+// hardware support memory tagging.
 INTERFACE WEAK void
 SCUDO_PREFIX(malloc_set_track_allocation_stacks)(int track) {
   SCUDO_ALLOCATOR.setTrackAllocationStacks(track);
 }
 
-// Sets whether scudo zero-initializes all allocated memory. The program must
-// be single threaded at the point when the function is called.
+// Sets whether scudo zero-initializes all allocated memory.
 INTERFACE WEAK void SCUDO_PREFIX(malloc_set_zero_contents)(int zero_contents) {
   SCUDO_ALLOCATOR.setFillContents(zero_contents ? scudo::ZeroFill
                                                 : scudo::NoFill);
 }
 
-// Sets whether scudo pattern-initializes all allocated memory. The program must
-// be single threaded at the point when the function is called.
+// Sets whether scudo pattern-initializes all allocated memory.
 INTERFACE WEAK void
 SCUDO_PREFIX(malloc_set_pattern_fill_contents)(int pattern_fill_contents) {
   SCUDO_ALLOCATOR.setFillContents(


        


More information about the llvm-commits mailing list