[compiler-rt] 78f786d - Revert "Delete sanitizer_common-based ('old') scudo: o7"

Krasimir Georgiev via llvm-commits llvm-commits at lists.llvm.org
Tue Dec 13 07:04:20 PST 2022


Author: Krasimir Georgiev
Date: 2022-12-13T16:03:11+01:00
New Revision: 78f786d02a464ae69594ee8f678e4cee495642f5

URL: https://github.com/llvm/llvm-project/commit/78f786d02a464ae69594ee8f678e4cee495642f5
DIFF: https://github.com/llvm/llvm-project/commit/78f786d02a464ae69594ee8f678e4cee495642f5.diff

LOG: Revert "Delete sanitizer_common-based ('old') scudo: o7"

This reverts commit 512a98e7184e5d48cefbe39da049af3b08ad3919.
We'll need some time to migrate some internal usages off this.
Will sync up with @hctim and @vitalybuka directly.

Added: 
    compiler-rt/lib/scudo/CMakeLists.txt
    compiler-rt/lib/scudo/scudo_allocator.cpp
    compiler-rt/lib/scudo/scudo_allocator.h
    compiler-rt/lib/scudo/scudo_allocator_combined.h
    compiler-rt/lib/scudo/scudo_allocator_secondary.h
    compiler-rt/lib/scudo/scudo_crc32.cpp
    compiler-rt/lib/scudo/scudo_crc32.h
    compiler-rt/lib/scudo/scudo_errors.cpp
    compiler-rt/lib/scudo/scudo_errors.h
    compiler-rt/lib/scudo/scudo_flags.cpp
    compiler-rt/lib/scudo/scudo_flags.h
    compiler-rt/lib/scudo/scudo_flags.inc
    compiler-rt/lib/scudo/scudo_interface_internal.h
    compiler-rt/lib/scudo/scudo_malloc.cpp
    compiler-rt/lib/scudo/scudo_new_delete.cpp
    compiler-rt/lib/scudo/scudo_platform.h
    compiler-rt/lib/scudo/scudo_termination.cpp
    compiler-rt/lib/scudo/scudo_tsd.h
    compiler-rt/lib/scudo/scudo_tsd_exclusive.cpp
    compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
    compiler-rt/lib/scudo/scudo_tsd_shared.cpp
    compiler-rt/lib/scudo/scudo_tsd_shared.inc
    compiler-rt/lib/scudo/scudo_utils.cpp
    compiler-rt/lib/scudo/scudo_utils.h

Modified: 
    

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/scudo/CMakeLists.txt b/compiler-rt/lib/scudo/CMakeLists.txt
new file mode 100644
index 0000000000000..c75ba2540afc4
--- /dev/null
+++ b/compiler-rt/lib/scudo/CMakeLists.txt
@@ -0,0 +1,160 @@
+add_compiler_rt_component(scudo)
+
+include_directories(..)
+
+set(SCUDO_CFLAGS ${SANITIZER_COMMON_CFLAGS})
+# SANITIZER_COMMON_CFLAGS include -fno-builtin, but we actually want builtins!
+list(APPEND SCUDO_CFLAGS -fbuiltin)
+append_rtti_flag(OFF SCUDO_CFLAGS)
+
+# Too many existing bugs, needs cleanup.
+append_list_if(COMPILER_RT_HAS_WNO_FORMAT -Wno-format SCUDO_CFLAGS)
+
+set(SCUDO_MINIMAL_DYNAMIC_LIBS ${SANITIZER_COMMON_LINK_LIBS})
+append_list_if(COMPILER_RT_HAS_LIBDL dl SCUDO_MINIMAL_DYNAMIC_LIBS)
+append_list_if(COMPILER_RT_HAS_LIBRT rt SCUDO_MINIMAL_DYNAMIC_LIBS)
+append_list_if(COMPILER_RT_HAS_LIBPTHREAD pthread SCUDO_MINIMAL_DYNAMIC_LIBS)
+append_list_if(COMPILER_RT_HAS_LIBLOG log SCUDO_MINIMAL_DYNAMIC_LIBS)
+append_list_if(COMPILER_RT_HAS_OMIT_FRAME_POINTER_FLAG -fno-omit-frame-pointer
+               SCUDO_CFLAGS)
+
+set(SCUDO_DYNAMIC_LINK_FLAGS ${SANITIZER_COMMON_LINK_FLAGS})
+# Use gc-sections by default to avoid unused code being pulled in.
+list(APPEND SCUDO_DYNAMIC_LINK_FLAGS -Wl,--gc-sections)
+
+if(ANDROID)
+# Put most Sanitizer shared libraries in the global group. For more details, see
+# android-changes-for-ndk-developers.md#changes-to-library-search-order
+  if (COMPILER_RT_HAS_Z_GLOBAL)
+    list(APPEND SCUDO_DYNAMIC_LINK_FLAGS -Wl,-z,global)
+  endif()
+endif()
+
+# The minimal Scudo runtime does not include the UBSan runtime.
+set(SCUDO_MINIMAL_OBJECT_LIBS
+  RTSanitizerCommonNoTermination
+  RTSanitizerCommonLibc
+  RTInterception)
+
+if (COMPILER_RT_HAS_GWP_ASAN)
+  list(APPEND SCUDO_MINIMAL_OBJECT_LIBS
+       RTGwpAsan RTGwpAsanOptionsParser RTGwpAsanBacktraceLibc
+       RTGwpAsanSegvHandler)
+  list(APPEND SCUDO_CFLAGS -DGWP_ASAN_HOOKS)
+endif()
+
+set(SCUDO_OBJECT_LIBS ${SCUDO_MINIMAL_OBJECT_LIBS})
+set(SCUDO_DYNAMIC_LIBS ${SCUDO_MINIMAL_DYNAMIC_LIBS})
+
+if (FUCHSIA)
+  list(APPEND SCUDO_CFLAGS -nostdinc++)
+  list(APPEND SCUDO_DYNAMIC_LINK_FLAGS -nostdlib++)
+else()
+  list(APPEND SCUDO_DYNAMIC_LIBS
+    ${COMPILER_RT_UNWINDER_LINK_LIBS}
+    ${SANITIZER_CXX_ABI_LIBRARIES})
+  list(APPEND SCUDO_OBJECT_LIBS
+    RTSanitizerCommonCoverage
+    RTSanitizerCommonSymbolizer
+    RTUbsan)
+endif()
+
+set(SCUDO_SOURCES
+  scudo_allocator.cpp
+  scudo_crc32.cpp
+  scudo_errors.cpp
+  scudo_flags.cpp
+  scudo_malloc.cpp
+  scudo_termination.cpp
+  scudo_tsd_exclusive.cpp
+  scudo_tsd_shared.cpp
+  scudo_utils.cpp)
+
+set(SCUDO_CXX_SOURCES
+  scudo_new_delete.cpp)
+
+set(SCUDO_HEADERS
+  scudo_allocator.h
+  scudo_allocator_combined.h
+  scudo_allocator_secondary.h
+  scudo_crc32.h
+  scudo_errors.h
+  scudo_flags.h
+  scudo_flags.inc
+  scudo_interface_internal.h
+  scudo_platform.h
+  scudo_tsd.h
+  scudo_tsd_exclusive.inc
+  scudo_tsd_shared.inc
+  scudo_utils.h)
+
+# Enable the necessary instruction set for scudo_crc32.cpp, if available.
+# Newer compiler versions use -mcrc32 rather than -msse4.2.
+if (COMPILER_RT_HAS_MCRC32_FLAG)
+  set_source_files_properties(scudo_crc32.cpp PROPERTIES COMPILE_FLAGS -mcrc32)
+elseif (COMPILER_RT_HAS_MSSE4_2_FLAG)
+  set_source_files_properties(scudo_crc32.cpp PROPERTIES COMPILE_FLAGS -msse4.2)
+endif()
+
+# Enable the AArch64 CRC32 feature for scudo_crc32.cpp, if available.
+# Note that it is enabled by default starting with armv8.1-a.
+if (COMPILER_RT_HAS_MCRC_FLAG)
+  set_source_files_properties(scudo_crc32.cpp PROPERTIES COMPILE_FLAGS -mcrc)
+endif()
+
+if(COMPILER_RT_HAS_SCUDO)
+  add_compiler_rt_runtime(clang_rt.scudo_minimal
+    STATIC
+    ARCHS ${SCUDO_SUPPORTED_ARCH}
+    SOURCES ${SCUDO_SOURCES}
+    ADDITIONAL_HEADERS ${SCUDO_HEADERS}
+    OBJECT_LIBS ${SCUDO_MINIMAL_OBJECT_LIBS}
+    CFLAGS ${SCUDO_CFLAGS}
+    PARENT_TARGET scudo)
+  add_compiler_rt_runtime(clang_rt.scudo_cxx_minimal
+    STATIC
+    ARCHS ${SCUDO_SUPPORTED_ARCH}
+    SOURCES ${SCUDO_CXX_SOURCES}
+    ADDITIONAL_HEADERS ${SCUDO_HEADERS}
+    CFLAGS ${SCUDO_CFLAGS}
+    PARENT_TARGET scudo)
+
+  add_compiler_rt_runtime(clang_rt.scudo
+    STATIC
+    ARCHS ${SCUDO_SUPPORTED_ARCH}
+    SOURCES ${SCUDO_SOURCES}
+    ADDITIONAL_HEADERS ${SCUDO_HEADERS}
+    OBJECT_LIBS ${SCUDO_OBJECT_LIBS}
+    CFLAGS ${SCUDO_CFLAGS}
+    PARENT_TARGET scudo)
+  add_compiler_rt_runtime(clang_rt.scudo_cxx
+    STATIC
+    ARCHS ${SCUDO_SUPPORTED_ARCH}
+    SOURCES ${SCUDO_CXX_SOURCES}
+    ADDITIONAL_HEADERS ${SCUDO_HEADERS}
+    OBJECT_LIBS RTUbsan_cxx
+    CFLAGS ${SCUDO_CFLAGS}
+    PARENT_TARGET scudo)
+
+  add_compiler_rt_runtime(clang_rt.scudo_minimal
+    SHARED
+    ARCHS ${SCUDO_SUPPORTED_ARCH}
+    SOURCES ${SCUDO_SOURCES} ${SCUDO_CXX_SOURCES}
+    ADDITIONAL_HEADERS ${SCUDO_HEADERS}
+    OBJECT_LIBS ${SCUDO_MINIMAL_OBJECT_LIBS}
+    CFLAGS ${SCUDO_CFLAGS}
+    LINK_FLAGS ${SCUDO_DYNAMIC_LINK_FLAGS}
+    LINK_LIBS ${SCUDO_MINIMAL_DYNAMIC_LIBS}
+    PARENT_TARGET scudo)
+
+  add_compiler_rt_runtime(clang_rt.scudo
+    SHARED
+    ARCHS ${SCUDO_SUPPORTED_ARCH}
+    SOURCES ${SCUDO_SOURCES} ${SCUDO_CXX_SOURCES}
+    ADDITIONAL_HEADERS ${SCUDO_HEADERS}
+    OBJECT_LIBS ${SCUDO_OBJECT_LIBS}
+    CFLAGS ${SCUDO_CFLAGS}
+    LINK_FLAGS ${SCUDO_DYNAMIC_LINK_FLAGS}
+    LINK_LIBS ${SCUDO_DYNAMIC_LIBS}
+    PARENT_TARGET scudo)
+endif()

diff  --git a/compiler-rt/lib/scudo/scudo_allocator.cpp b/compiler-rt/lib/scudo/scudo_allocator.cpp
new file mode 100644
index 0000000000000..6a6b577ab0025
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_allocator.cpp
@@ -0,0 +1,831 @@
+//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo Hardened Allocator implementation.
+/// It uses the sanitizer_common allocator as a base and aims at mitigating
+/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
+/// header, a delayed free list, and additional sanity checks.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_allocator.h"
+#include "scudo_crc32.h"
+#include "scudo_errors.h"
+#include "scudo_flags.h"
+#include "scudo_interface_internal.h"
+#include "scudo_tsd.h"
+#include "scudo_utils.h"
+
+#include "sanitizer_common/sanitizer_allocator_checks.h"
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_quarantine.h"
+
+#ifdef GWP_ASAN_HOOKS
+# include "gwp_asan/guarded_pool_allocator.h"
+# include "gwp_asan/optional/backtrace.h"
+# include "gwp_asan/optional/options_parser.h"
+#include "gwp_asan/optional/segv_handler.h"
+#endif // GWP_ASAN_HOOKS
+
+#include <errno.h>
+#include <string.h>
+
+namespace __scudo {
+
+// Global static cookie, initialized at start-up.
+static u32 Cookie;
+
+// We default to software CRC32 if the alternatives are not supported, either
+// at compilation or at runtime.
+static atomic_uint8_t HashAlgorithm = { CRC32Software };
+
+inline u32 computeCRC32(u32 Crc, uptr Value, uptr *Array, uptr ArraySize) {
+  // If the hardware CRC32 feature is defined here, it was enabled everywhere,
+  // as opposed to only for scudo_crc32.cpp. This means that other hardware
+  // specific instructions were likely emitted at other places, and as a
+  // result there is no reason to not use it here.
+#if defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+  Crc = CRC32_INTRINSIC(Crc, Value);
+  for (uptr i = 0; i < ArraySize; i++)
+    Crc = CRC32_INTRINSIC(Crc, Array[i]);
+  return Crc;
+#else
+  if (atomic_load_relaxed(&HashAlgorithm) == CRC32Hardware) {
+    Crc = computeHardwareCRC32(Crc, Value);
+    for (uptr i = 0; i < ArraySize; i++)
+      Crc = computeHardwareCRC32(Crc, Array[i]);
+    return Crc;
+  }
+  Crc = computeSoftwareCRC32(Crc, Value);
+  for (uptr i = 0; i < ArraySize; i++)
+    Crc = computeSoftwareCRC32(Crc, Array[i]);
+  return Crc;
+#endif  // defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+}
+
+static BackendT &getBackend();
+
+namespace Chunk {
+  static inline AtomicPackedHeader *getAtomicHeader(void *Ptr) {
+    return reinterpret_cast<AtomicPackedHeader *>(reinterpret_cast<uptr>(Ptr) -
+        getHeaderSize());
+  }
+  static inline
+  const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) {
+    return reinterpret_cast<const AtomicPackedHeader *>(
+        reinterpret_cast<uptr>(Ptr) - getHeaderSize());
+  }
+
+  static inline bool isAligned(const void *Ptr) {
+    return IsAligned(reinterpret_cast<uptr>(Ptr), MinAlignment);
+  }
+
+  // We can't use the offset member of the chunk itself, as we would double
+  // fetch it without any warranty that it wouldn't have been tampered. To
+  // prevent this, we work with a local copy of the header.
+  static inline void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) {
+    return reinterpret_cast<void *>(reinterpret_cast<uptr>(Ptr) -
+        getHeaderSize() - (Header->Offset << MinAlignmentLog));
+  }
+
+  // Returns the usable size for a chunk, meaning the amount of bytes from the
+  // beginning of the user data to the end of the backend allocated chunk.
+  static inline uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
+    const uptr ClassId = Header->ClassId;
+    if (ClassId)
+      return PrimaryT::ClassIdToSize(ClassId) - getHeaderSize() -
+          (Header->Offset << MinAlignmentLog);
+    return SecondaryT::GetActuallyAllocatedSize(
+        getBackendPtr(Ptr, Header)) - getHeaderSize();
+  }
+
+  // Returns the size the user requested when allocating the chunk.
+  static inline uptr getSize(const void *Ptr, UnpackedHeader *Header) {
+    const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes;
+    if (Header->ClassId)
+      return SizeOrUnusedBytes;
+    return SecondaryT::GetActuallyAllocatedSize(
+        getBackendPtr(Ptr, Header)) - getHeaderSize() - SizeOrUnusedBytes;
+  }
+
+  // Compute the checksum of the chunk pointer and its header.
+  static inline u16 computeChecksum(const void *Ptr, UnpackedHeader *Header) {
+    UnpackedHeader ZeroChecksumHeader = *Header;
+    ZeroChecksumHeader.Checksum = 0;
+    uptr HeaderHolder[sizeof(UnpackedHeader) / sizeof(uptr)];
+    memcpy(&HeaderHolder, &ZeroChecksumHeader, sizeof(HeaderHolder));
+    const u32 Crc = computeCRC32(Cookie, reinterpret_cast<uptr>(Ptr),
+                                 HeaderHolder, ARRAY_SIZE(HeaderHolder));
+    return static_cast<u16>(Crc);
+  }
+
+  // Checks the validity of a chunk by verifying its checksum. It doesn't
+  // incur termination in the event of an invalid chunk.
+  static inline bool isValid(const void *Ptr) {
+    PackedHeader NewPackedHeader =
+        atomic_load_relaxed(getConstAtomicHeader(Ptr));
+    UnpackedHeader NewUnpackedHeader =
+        bit_cast<UnpackedHeader>(NewPackedHeader);
+    return (NewUnpackedHeader.Checksum ==
+            computeChecksum(Ptr, &NewUnpackedHeader));
+  }
+
+  // Ensure that ChunkAvailable is 0, so that if a 0 checksum is ever valid
+  // for a fully nulled out header, its state will be available anyway.
+  COMPILER_CHECK(ChunkAvailable == 0);
+
+  // Loads and unpacks the header, verifying the checksum in the process.
+  static inline
+  void loadHeader(const void *Ptr, UnpackedHeader *NewUnpackedHeader) {
+    PackedHeader NewPackedHeader =
+        atomic_load_relaxed(getConstAtomicHeader(Ptr));
+    *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
+    if (UNLIKELY(NewUnpackedHeader->Checksum !=
+        computeChecksum(Ptr, NewUnpackedHeader)))
+      dieWithMessage("corrupted chunk header at address %p\n", Ptr);
+  }
+
+  // Packs and stores the header, computing the checksum in the process.
+  static inline void storeHeader(void *Ptr, UnpackedHeader *NewUnpackedHeader) {
+    NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
+    PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
+    atomic_store_relaxed(getAtomicHeader(Ptr), NewPackedHeader);
+  }
+
+  // Packs and stores the header, computing the checksum in the process. We
+  // compare the current header with the expected provided one to ensure that
+  // we are not being raced by a corruption occurring in another thread.
+  static inline void compareExchangeHeader(void *Ptr,
+                                           UnpackedHeader *NewUnpackedHeader,
+                                           UnpackedHeader *OldUnpackedHeader) {
+    NewUnpackedHeader->Checksum = computeChecksum(Ptr, NewUnpackedHeader);
+    PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
+    PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
+    if (UNLIKELY(!atomic_compare_exchange_strong(
+            getAtomicHeader(Ptr), &OldPackedHeader, NewPackedHeader,
+            memory_order_relaxed)))
+      dieWithMessage("race on chunk header at address %p\n", Ptr);
+  }
+}  // namespace Chunk
+
+struct QuarantineCallback {
+  explicit QuarantineCallback(AllocatorCacheT *Cache)
+    : Cache_(Cache) {}
+
+  // Chunk recycling function, returns a quarantined chunk to the backend,
+  // first making sure it hasn't been tampered with.
+  void Recycle(void *Ptr) {
+    UnpackedHeader Header;
+    Chunk::loadHeader(Ptr, &Header);
+    if (UNLIKELY(Header.State != ChunkQuarantine))
+      dieWithMessage("invalid chunk state when recycling address %p\n", Ptr);
+    UnpackedHeader NewHeader = Header;
+    NewHeader.State = ChunkAvailable;
+    Chunk::compareExchangeHeader(Ptr, &NewHeader, &Header);
+    void *BackendPtr = Chunk::getBackendPtr(Ptr, &Header);
+    if (Header.ClassId)
+      getBackend().deallocatePrimary(Cache_, BackendPtr, Header.ClassId);
+    else
+      getBackend().deallocateSecondary(BackendPtr);
+  }
+
+  // Internal quarantine allocation and deallocation functions. We first check
+  // that the batches are indeed serviced by the Primary.
+  // TODO(kostyak): figure out the best way to protect the batches.
+  void *Allocate(uptr Size) {
+    const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
+    return getBackend().allocatePrimary(Cache_, BatchClassId);
+  }
+
+  void Deallocate(void *Ptr) {
+    const uptr BatchClassId = SizeClassMap::ClassID(sizeof(QuarantineBatch));
+    getBackend().deallocatePrimary(Cache_, Ptr, BatchClassId);
+  }
+
+  AllocatorCacheT *Cache_;
+  COMPILER_CHECK(sizeof(QuarantineBatch) < SizeClassMap::kMaxSize);
+};
+
+typedef Quarantine<QuarantineCallback, void> QuarantineT;
+typedef QuarantineT::Cache QuarantineCacheT;
+COMPILER_CHECK(sizeof(QuarantineCacheT) <=
+               sizeof(ScudoTSD::QuarantineCachePlaceHolder));
+
+QuarantineCacheT *getQuarantineCache(ScudoTSD *TSD) {
+  return reinterpret_cast<QuarantineCacheT *>(TSD->QuarantineCachePlaceHolder);
+}
+
+#ifdef GWP_ASAN_HOOKS
+static gwp_asan::GuardedPoolAllocator GuardedAlloc;
+#endif // GWP_ASAN_HOOKS
+
+struct Allocator {
+  static const uptr MaxAllowedMallocSize =
+      FIRST_32_SECOND_64(2UL << 30, 1ULL << 40);
+
+  BackendT Backend;
+  QuarantineT Quarantine;
+
+  u32 QuarantineChunksUpToSize;
+
+  bool DeallocationTypeMismatch;
+  bool ZeroContents;
+  bool DeleteSizeMismatch;
+
+  bool CheckRssLimit;
+  uptr HardRssLimitMb;
+  uptr SoftRssLimitMb;
+  atomic_uint8_t RssLimitExceeded;
+  atomic_uint64_t RssLastCheckedAtNS;
+
+  explicit Allocator(LinkerInitialized)
+    : Quarantine(LINKER_INITIALIZED) {}
+
+  NOINLINE void performSanityChecks();
+
+  void init() {
+    SanitizerToolName = "Scudo";
+    PrimaryAllocatorName = "ScudoPrimary";
+    SecondaryAllocatorName = "ScudoSecondary";
+
+    initFlags();
+
+    performSanityChecks();
+
+    // Check if hardware CRC32 is supported in the binary and by the platform,
+    // if so, opt for the CRC32 hardware version of the checksum.
+    if (&computeHardwareCRC32 && hasHardwareCRC32())
+      atomic_store_relaxed(&HashAlgorithm, CRC32Hardware);
+
+    SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null);
+    Backend.init(common_flags()->allocator_release_to_os_interval_ms);
+    HardRssLimitMb = common_flags()->hard_rss_limit_mb;
+    SoftRssLimitMb = common_flags()->soft_rss_limit_mb;
+    Quarantine.Init(
+        static_cast<uptr>(getFlags()->QuarantineSizeKb) << 10,
+        static_cast<uptr>(getFlags()->ThreadLocalQuarantineSizeKb) << 10);
+    QuarantineChunksUpToSize = (Quarantine.GetCacheSize() == 0) ? 0 :
+        getFlags()->QuarantineChunksUpToSize;
+    DeallocationTypeMismatch = getFlags()->DeallocationTypeMismatch;
+    DeleteSizeMismatch = getFlags()->DeleteSizeMismatch;
+    ZeroContents = getFlags()->ZeroContents;
+
+    if (UNLIKELY(!GetRandom(reinterpret_cast<void *>(&Cookie), sizeof(Cookie),
+                            /*blocking=*/false))) {
+      Cookie = static_cast<u32>((NanoTime() >> 12) ^
+                                (reinterpret_cast<uptr>(this) >> 4));
+    }
+
+    CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
+    if (CheckRssLimit)
+      atomic_store_relaxed(&RssLastCheckedAtNS, MonotonicNanoTime());
+  }
+
+  // Helper function that checks for a valid Scudo chunk. nullptr isn't.
+  bool isValidPointer(const void *Ptr) {
+    initThreadMaybe();
+    if (UNLIKELY(!Ptr))
+      return false;
+    if (!Chunk::isAligned(Ptr))
+      return false;
+    return Chunk::isValid(Ptr);
+  }
+
+  NOINLINE bool isRssLimitExceeded();
+
+  // Allocates a chunk.
+  void *
+  allocate(uptr Size, uptr Alignment, AllocType Type,
+           bool ForceZeroContents = false) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+    initThreadMaybe();
+
+    if (UNLIKELY(Alignment > MaxAlignment)) {
+      if (AllocatorMayReturnNull())
+        return nullptr;
+      reportAllocationAlignmentTooBig(Alignment, MaxAlignment);
+    }
+    if (UNLIKELY(Alignment < MinAlignment))
+      Alignment = MinAlignment;
+
+#ifdef GWP_ASAN_HOOKS
+    if (UNLIKELY(GuardedAlloc.shouldSample())) {
+      if (void *Ptr = GuardedAlloc.allocate(Size, Alignment)) {
+        if (SCUDO_CAN_USE_HOOKS && &__sanitizer_malloc_hook)
+          __sanitizer_malloc_hook(Ptr, Size);
+        return Ptr;
+      }
+    }
+#endif // GWP_ASAN_HOOKS
+
+    const uptr NeededSize = RoundUpTo(Size ? Size : 1, MinAlignment) +
+        Chunk::getHeaderSize();
+    const uptr AlignedSize = (Alignment > MinAlignment) ?
+        NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize;
+    if (UNLIKELY(Size >= MaxAllowedMallocSize) ||
+        UNLIKELY(AlignedSize >= MaxAllowedMallocSize)) {
+      if (AllocatorMayReturnNull())
+        return nullptr;
+      reportAllocationSizeTooBig(Size, AlignedSize, MaxAllowedMallocSize);
+    }
+
+    if (CheckRssLimit && UNLIKELY(isRssLimitExceeded())) {
+      if (AllocatorMayReturnNull())
+        return nullptr;
+      reportRssLimitExceeded();
+    }
+
+    // Primary and Secondary backed allocations have a 
diff erent treatment. We
+    // deal with alignment requirements of Primary serviced allocations here,
+    // but the Secondary will take care of its own alignment needs.
+    void *BackendPtr;
+    uptr BackendSize;
+    u8 ClassId;
+    if (PrimaryT::CanAllocate(AlignedSize, MinAlignment)) {
+      BackendSize = AlignedSize;
+      ClassId = SizeClassMap::ClassID(BackendSize);
+      bool UnlockRequired;
+      ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
+      BackendPtr = Backend.allocatePrimary(&TSD->Cache, ClassId);
+      if (UnlockRequired)
+        TSD->unlock();
+    } else {
+      BackendSize = NeededSize;
+      ClassId = 0;
+      BackendPtr = Backend.allocateSecondary(BackendSize, Alignment);
+    }
+    if (UNLIKELY(!BackendPtr)) {
+      SetAllocatorOutOfMemory();
+      if (AllocatorMayReturnNull())
+        return nullptr;
+      reportOutOfMemory(Size);
+    }
+
+    // If requested, we will zero out the entire contents of the returned chunk.
+    if ((ForceZeroContents || ZeroContents) && ClassId)
+      memset(BackendPtr, 0, PrimaryT::ClassIdToSize(ClassId));
+
+    UnpackedHeader Header = {};
+    uptr UserPtr = reinterpret_cast<uptr>(BackendPtr) + Chunk::getHeaderSize();
+    if (UNLIKELY(!IsAligned(UserPtr, Alignment))) {
+      // Since the Secondary takes care of alignment, a non-aligned pointer
+      // means it is from the Primary. It is also the only case where the offset
+      // field of the header would be non-zero.
+      DCHECK(ClassId);
+      const uptr AlignedUserPtr = RoundUpTo(UserPtr, Alignment);
+      Header.Offset = (AlignedUserPtr - UserPtr) >> MinAlignmentLog;
+      UserPtr = AlignedUserPtr;
+    }
+    DCHECK_LE(UserPtr + Size, reinterpret_cast<uptr>(BackendPtr) + BackendSize);
+    Header.State = ChunkAllocated;
+    Header.AllocType = Type;
+    if (ClassId) {
+      Header.ClassId = ClassId;
+      Header.SizeOrUnusedBytes = Size;
+    } else {
+      // The secondary fits the allocations to a page, so the amount of unused
+      // bytes is the 
diff erence between the end of the user allocation and the
+      // next page boundary.
+      const uptr PageSize = GetPageSizeCached();
+      const uptr TrailingBytes = (UserPtr + Size) & (PageSize - 1);
+      if (TrailingBytes)
+        Header.SizeOrUnusedBytes = PageSize - TrailingBytes;
+    }
+    void *Ptr = reinterpret_cast<void *>(UserPtr);
+    Chunk::storeHeader(Ptr, &Header);
+    if (SCUDO_CAN_USE_HOOKS && &__sanitizer_malloc_hook)
+      __sanitizer_malloc_hook(Ptr, Size);
+    return Ptr;
+  }
+
+  // Place a chunk in the quarantine or directly deallocate it in the event of
+  // a zero-sized quarantine, or if the size of the chunk is greater than the
+  // quarantine chunk size threshold.
+  void quarantineOrDeallocateChunk(void *Ptr, UnpackedHeader *Header, uptr Size)
+      SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+    const bool BypassQuarantine = !Size || (Size > QuarantineChunksUpToSize);
+    if (BypassQuarantine) {
+      UnpackedHeader NewHeader = *Header;
+      NewHeader.State = ChunkAvailable;
+      Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
+      void *BackendPtr = Chunk::getBackendPtr(Ptr, Header);
+      if (Header->ClassId) {
+        bool UnlockRequired;
+        ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
+        getBackend().deallocatePrimary(&TSD->Cache, BackendPtr,
+                                       Header->ClassId);
+        if (UnlockRequired)
+          TSD->unlock();
+      } else {
+        getBackend().deallocateSecondary(BackendPtr);
+      }
+    } else {
+      // If a small memory amount was allocated with a larger alignment, we want
+      // to take that into account. Otherwise the Quarantine would be filled
+      // with tiny chunks, taking a lot of VA memory. This is an approximation
+      // of the usable size, that allows us to not call
+      // GetActuallyAllocatedSize.
+      const uptr EstimatedSize = Size + (Header->Offset << MinAlignmentLog);
+      UnpackedHeader NewHeader = *Header;
+      NewHeader.State = ChunkQuarantine;
+      Chunk::compareExchangeHeader(Ptr, &NewHeader, Header);
+      bool UnlockRequired;
+      ScudoTSD *TSD = getTSDAndLock(&UnlockRequired);
+      Quarantine.Put(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache),
+                     Ptr, EstimatedSize);
+      if (UnlockRequired)
+        TSD->unlock();
+    }
+  }
+
+  // Deallocates a Chunk, which means either adding it to the quarantine or
+  // directly returning it to the backend if criteria are met.
+  void deallocate(void *Ptr, uptr DeleteSize, uptr DeleteAlignment,
+                  AllocType Type) {
+    // For a deallocation, we only ensure minimal initialization, meaning thread
+    // local data will be left uninitialized for now (when using ELF TLS). The
+    // fallback cache will be used instead. This is a workaround for a situation
+    // where the only heap operation performed in a thread would be a free past
+    // the TLS destructors, ending up in initialized thread specific data never
+    // being destroyed properly. Any other heap operation will do a full init.
+    initThreadMaybe(/*MinimalInit=*/true);
+    if (SCUDO_CAN_USE_HOOKS && &__sanitizer_free_hook)
+      __sanitizer_free_hook(Ptr);
+    if (UNLIKELY(!Ptr))
+      return;
+
+#ifdef GWP_ASAN_HOOKS
+    if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) {
+      GuardedAlloc.deallocate(Ptr);
+      return;
+    }
+#endif // GWP_ASAN_HOOKS
+
+    if (UNLIKELY(!Chunk::isAligned(Ptr)))
+      dieWithMessage("misaligned pointer when deallocating address %p\n", Ptr);
+    UnpackedHeader Header;
+    Chunk::loadHeader(Ptr, &Header);
+    if (UNLIKELY(Header.State != ChunkAllocated))
+      dieWithMessage("invalid chunk state when deallocating address %p\n", Ptr);
+    if (DeallocationTypeMismatch) {
+      // The deallocation type has to match the allocation one.
+      if (Header.AllocType != Type) {
+        // With the exception of memalign'd Chunks, that can be still be free'd.
+        if (Header.AllocType != FromMemalign || Type != FromMalloc)
+          dieWithMessage("allocation type mismatch when deallocating address "
+                         "%p\n", Ptr);
+      }
+    }
+    const uptr Size = Chunk::getSize(Ptr, &Header);
+    if (DeleteSizeMismatch) {
+      if (DeleteSize && DeleteSize != Size)
+        dieWithMessage("invalid sized delete when deallocating address %p\n",
+                       Ptr);
+    }
+    (void)DeleteAlignment;  // TODO(kostyak): verify that the alignment matches.
+    quarantineOrDeallocateChunk(Ptr, &Header, Size);
+  }
+
+  // Reallocates a chunk. We can save on a new allocation if the new requested
+  // size still fits in the chunk.
+  void *reallocate(void *OldPtr, uptr NewSize) {
+    initThreadMaybe();
+
+#ifdef GWP_ASAN_HOOKS
+    if (UNLIKELY(GuardedAlloc.pointerIsMine(OldPtr))) {
+      size_t OldSize = GuardedAlloc.getSize(OldPtr);
+      void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
+      if (NewPtr)
+        memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize);
+      GuardedAlloc.deallocate(OldPtr);
+      return NewPtr;
+    }
+#endif // GWP_ASAN_HOOKS
+
+    if (UNLIKELY(!Chunk::isAligned(OldPtr)))
+      dieWithMessage("misaligned address when reallocating address %p\n",
+                     OldPtr);
+    UnpackedHeader OldHeader;
+    Chunk::loadHeader(OldPtr, &OldHeader);
+    if (UNLIKELY(OldHeader.State != ChunkAllocated))
+      dieWithMessage("invalid chunk state when reallocating address %p\n",
+                     OldPtr);
+    if (DeallocationTypeMismatch) {
+      if (UNLIKELY(OldHeader.AllocType != FromMalloc))
+        dieWithMessage("allocation type mismatch when reallocating address "
+                       "%p\n", OldPtr);
+    }
+    const uptr UsableSize = Chunk::getUsableSize(OldPtr, &OldHeader);
+    // The new size still fits in the current chunk, and the size 
diff erence
+    // is reasonable.
+    if (NewSize <= UsableSize &&
+        (UsableSize - NewSize) < (SizeClassMap::kMaxSize / 2)) {
+      UnpackedHeader NewHeader = OldHeader;
+      NewHeader.SizeOrUnusedBytes =
+          OldHeader.ClassId ? NewSize : UsableSize - NewSize;
+      Chunk::compareExchangeHeader(OldPtr, &NewHeader, &OldHeader);
+      return OldPtr;
+    }
+    // Otherwise, we have to allocate a new chunk and copy the contents of the
+    // old one.
+    void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
+    if (NewPtr) {
+      const uptr OldSize = OldHeader.ClassId ? OldHeader.SizeOrUnusedBytes :
+          UsableSize - OldHeader.SizeOrUnusedBytes;
+      memcpy(NewPtr, OldPtr, Min(NewSize, UsableSize));
+      quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize);
+    }
+    return NewPtr;
+  }
+
+  // Helper function that returns the actual usable size of a chunk.
+  uptr getUsableSize(const void *Ptr) {
+    initThreadMaybe();
+    if (UNLIKELY(!Ptr))
+      return 0;
+
+#ifdef GWP_ASAN_HOOKS
+    if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr)))
+      return GuardedAlloc.getSize(Ptr);
+#endif // GWP_ASAN_HOOKS
+
+    UnpackedHeader Header;
+    Chunk::loadHeader(Ptr, &Header);
+    // Getting the usable size of a chunk only makes sense if it's allocated.
+    if (UNLIKELY(Header.State != ChunkAllocated))
+      dieWithMessage("invalid chunk state when sizing address %p\n", Ptr);
+    return Chunk::getUsableSize(Ptr, &Header);
+  }
+
+  void *calloc(uptr NMemB, uptr Size) {
+    initThreadMaybe();
+    if (UNLIKELY(CheckForCallocOverflow(NMemB, Size))) {
+      if (AllocatorMayReturnNull())
+        return nullptr;
+      reportCallocOverflow(NMemB, Size);
+    }
+    return allocate(NMemB * Size, MinAlignment, FromMalloc, true);
+  }
+
+  void commitBack(ScudoTSD *TSD) {
+    Quarantine.Drain(getQuarantineCache(TSD), QuarantineCallback(&TSD->Cache));
+    Backend.destroyCache(&TSD->Cache);
+  }
+
+  uptr getStats(AllocatorStat StatType) {
+    initThreadMaybe();
+    uptr stats[AllocatorStatCount];
+    Backend.getStats(stats);
+    return stats[StatType];
+  }
+
+  bool canReturnNull() {
+    initThreadMaybe();
+    return AllocatorMayReturnNull();
+  }
+
+  void setRssLimit(uptr LimitMb, bool HardLimit) {
+    if (HardLimit)
+      HardRssLimitMb = LimitMb;
+    else
+      SoftRssLimitMb = LimitMb;
+    CheckRssLimit = HardRssLimitMb || SoftRssLimitMb;
+  }
+
+  void printStats() {
+    initThreadMaybe();
+    Backend.printStats();
+  }
+};
+
+NOINLINE void Allocator::performSanityChecks() {
+  // Verify that the header offset field can hold the maximum offset. In the
+  // case of the Secondary allocator, it takes care of alignment and the
+  // offset will always be 0. In the case of the Primary, the worst case
+  // scenario happens in the last size class, when the backend allocation
+  // would already be aligned on the requested alignment, which would happen
+  // to be the maximum alignment that would fit in that size class. As a
+  // result, the maximum offset will be at most the maximum alignment for the
+  // last size class minus the header size, in multiples of MinAlignment.
+  UnpackedHeader Header = {};
+  const uptr MaxPrimaryAlignment =
+      1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment);
+  const uptr MaxOffset =
+      (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog;
+  Header.Offset = MaxOffset;
+  if (Header.Offset != MaxOffset)
+    dieWithMessage("maximum possible offset doesn't fit in header\n");
+  // Verify that we can fit the maximum size or amount of unused bytes in the
+  // header. Given that the Secondary fits the allocation to a page, the worst
+  // case scenario happens in the Primary. It will depend on the second to
+  // last and last class sizes, as well as the dynamic base for the Primary.
+  // The following is an over-approximation that works for our needs.
+  const uptr MaxSizeOrUnusedBytes = SizeClassMap::kMaxSize - 1;
+  Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes;
+  if (Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)
+    dieWithMessage("maximum possible unused bytes doesn't fit in header\n");
+
+  const uptr LargestClassId = SizeClassMap::kLargestClassID;
+  Header.ClassId = LargestClassId;
+  if (Header.ClassId != LargestClassId)
+    dieWithMessage("largest class ID doesn't fit in header\n");
+}
+
+// Opportunistic RSS limit check. This will update the RSS limit status, if
+// it can, every 250ms, otherwise it will just return the current one.
+NOINLINE bool Allocator::isRssLimitExceeded() {
+  u64 LastCheck = atomic_load_relaxed(&RssLastCheckedAtNS);
+  const u64 CurrentCheck = MonotonicNanoTime();
+  if (LIKELY(CurrentCheck < LastCheck + (250ULL * 1000000ULL)))
+    return atomic_load_relaxed(&RssLimitExceeded);
+  if (!atomic_compare_exchange_weak(&RssLastCheckedAtNS, &LastCheck,
+                                    CurrentCheck, memory_order_relaxed))
+    return atomic_load_relaxed(&RssLimitExceeded);
+  // TODO(kostyak): We currently use sanitizer_common's GetRSS which reads the
+  //                RSS from /proc/self/statm by default. We might want to
+  //                call getrusage directly, even if it's less accurate.
+  const uptr CurrentRssMb = GetRSS() >> 20;
+  if (HardRssLimitMb && UNLIKELY(HardRssLimitMb < CurrentRssMb))
+    dieWithMessage("hard RSS limit exhausted (%zdMb vs %zdMb)\n",
+                   HardRssLimitMb, CurrentRssMb);
+  if (SoftRssLimitMb) {
+    if (atomic_load_relaxed(&RssLimitExceeded)) {
+      if (CurrentRssMb <= SoftRssLimitMb)
+        atomic_store_relaxed(&RssLimitExceeded, false);
+    } else {
+      if (CurrentRssMb > SoftRssLimitMb) {
+        atomic_store_relaxed(&RssLimitExceeded, true);
+        Printf("Scudo INFO: soft RSS limit exhausted (%zdMb vs %zdMb)\n",
+               SoftRssLimitMb, CurrentRssMb);
+      }
+    }
+  }
+  return atomic_load_relaxed(&RssLimitExceeded);
+}
+
+static Allocator Instance(LINKER_INITIALIZED);
+
+static BackendT &getBackend() {
+  return Instance.Backend;
+}
+
+void initScudo() {
+  Instance.init();
+#ifdef GWP_ASAN_HOOKS
+  gwp_asan::options::initOptions(__sanitizer::GetEnv("GWP_ASAN_OPTIONS"),
+                                 Printf);
+  gwp_asan::options::Options &Opts = gwp_asan::options::getOptions();
+  Opts.Backtrace = gwp_asan::backtrace::getBacktraceFunction();
+  GuardedAlloc.init(Opts);
+
+  if (Opts.InstallSignalHandlers)
+    gwp_asan::segv_handler::installSignalHandlers(
+        &GuardedAlloc, __sanitizer::Printf,
+        gwp_asan::backtrace::getPrintBacktraceFunction(),
+        gwp_asan::backtrace::getSegvBacktraceFunction());
+#endif // GWP_ASAN_HOOKS
+}
+
+void ScudoTSD::init() {
+  getBackend().initCache(&Cache);
+  memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder));
+}
+
+void ScudoTSD::commitBack() {
+  Instance.commitBack(this);
+}
+
+void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type) {
+  if (Alignment && UNLIKELY(!IsPowerOfTwo(Alignment))) {
+    errno = EINVAL;
+    if (Instance.canReturnNull())
+      return nullptr;
+    reportAllocationAlignmentNotPowerOfTwo(Alignment);
+  }
+  return SetErrnoOnNull(Instance.allocate(Size, Alignment, Type));
+}
+
+void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type) {
+  Instance.deallocate(Ptr, Size, Alignment, Type);
+}
+
+void *scudoRealloc(void *Ptr, uptr Size) {
+  if (!Ptr)
+    return SetErrnoOnNull(Instance.allocate(Size, MinAlignment, FromMalloc));
+  if (Size == 0) {
+    Instance.deallocate(Ptr, 0, 0, FromMalloc);
+    return nullptr;
+  }
+  return SetErrnoOnNull(Instance.reallocate(Ptr, Size));
+}
+
+void *scudoCalloc(uptr NMemB, uptr Size) {
+  return SetErrnoOnNull(Instance.calloc(NMemB, Size));
+}
+
+void *scudoValloc(uptr Size) {
+  return SetErrnoOnNull(
+      Instance.allocate(Size, GetPageSizeCached(), FromMemalign));
+}
+
+void *scudoPvalloc(uptr Size) {
+  const uptr PageSize = GetPageSizeCached();
+  if (UNLIKELY(CheckForPvallocOverflow(Size, PageSize))) {
+    errno = ENOMEM;
+    if (Instance.canReturnNull())
+      return nullptr;
+    reportPvallocOverflow(Size);
+  }
+  // pvalloc(0) should allocate one page.
+  Size = Size ? RoundUpTo(Size, PageSize) : PageSize;
+  return SetErrnoOnNull(Instance.allocate(Size, PageSize, FromMemalign));
+}
+
+int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
+  if (UNLIKELY(!CheckPosixMemalignAlignment(Alignment))) {
+    if (!Instance.canReturnNull())
+      reportInvalidPosixMemalignAlignment(Alignment);
+    return EINVAL;
+  }
+  void *Ptr = Instance.allocate(Size, Alignment, FromMemalign);
+  if (UNLIKELY(!Ptr))
+    return ENOMEM;
+  *MemPtr = Ptr;
+  return 0;
+}
+
+void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
+  if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(Alignment, Size))) {
+    errno = EINVAL;
+    if (Instance.canReturnNull())
+      return nullptr;
+    reportInvalidAlignedAllocAlignment(Size, Alignment);
+  }
+  return SetErrnoOnNull(Instance.allocate(Size, Alignment, FromMalloc));
+}
+
+uptr scudoMallocUsableSize(void *Ptr) {
+  return Instance.getUsableSize(Ptr);
+}
+
+}  // namespace __scudo
+
+using namespace __scudo;
+
+// MallocExtension helper functions
+
+uptr __sanitizer_get_current_allocated_bytes() {
+  return Instance.getStats(AllocatorStatAllocated);
+}
+
+uptr __sanitizer_get_heap_size() {
+  return Instance.getStats(AllocatorStatMapped);
+}
+
+uptr __sanitizer_get_free_bytes() {
+  return 1;
+}
+
+uptr __sanitizer_get_unmapped_bytes() {
+  return 1;
+}
+
+uptr __sanitizer_get_estimated_allocated_size(uptr Size) {
+  return Size;
+}
+
+int __sanitizer_get_ownership(const void *Ptr) {
+  return Instance.isValidPointer(Ptr);
+}
+
+uptr __sanitizer_get_allocated_size(const void *Ptr) {
+  return Instance.getUsableSize(Ptr);
+}
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_malloc_hook,
+                             void *Ptr, uptr Size) {
+  (void)Ptr;
+  (void)Size;
+}
+
+SANITIZER_INTERFACE_WEAK_DEF(void, __sanitizer_free_hook, void *Ptr) {
+  (void)Ptr;
+}
+#endif
+
+// Interface functions
+
+void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit) {
+  if (!SCUDO_CAN_USE_PUBLIC_INTERFACE)
+    return;
+  Instance.setRssLimit(LimitMb, !!HardLimit);
+}
+
+void __scudo_print_stats() {
+  Instance.printStats();
+}

diff  --git a/compiler-rt/lib/scudo/scudo_allocator.h b/compiler-rt/lib/scudo/scudo_allocator.h
new file mode 100644
index 0000000000000..0efa5c5202961
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_allocator.h
@@ -0,0 +1,125 @@
+//===-- scudo_allocator.h ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Header for scudo_allocator.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_H_
+#define SCUDO_ALLOCATOR_H_
+
+#include "scudo_platform.h"
+
+namespace __scudo {
+
+enum AllocType : u8 {
+  FromMalloc    = 0,  // Memory block came from malloc, realloc, calloc, etc.
+  FromNew       = 1,  // Memory block came from operator new.
+  FromNewArray  = 2,  // Memory block came from operator new [].
+  FromMemalign  = 3,  // Memory block came from memalign, posix_memalign, etc.
+};
+
+enum ChunkState : u8 {
+  ChunkAvailable  = 0,
+  ChunkAllocated  = 1,
+  ChunkQuarantine = 2
+};
+
+// Our header requires 64 bits of storage. Having the offset saves us from
+// using functions such as GetBlockBegin, that is fairly costly. Our first
+// implementation used the MetaData as well, which offers the advantage of
+// being stored away from the chunk itself, but accessing it was costly as
+// well. The header will be atomically loaded and stored.
+typedef u64 PackedHeader;
+struct UnpackedHeader {
+  u64 Checksum          : 16;
+  u64 ClassId           : 8;
+  u64 SizeOrUnusedBytes : 20;  // Size for Primary backed allocations, amount of
+                               // unused bytes in the chunk for Secondary ones.
+  u64 State             : 2;   // available, allocated, or quarantined
+  u64 AllocType         : 2;   // malloc, new, new[], or memalign
+  u64 Offset            : 16;  // Offset from the beginning of the backend
+                               // allocation to the beginning of the chunk
+                               // itself, in multiples of MinAlignment. See
+                               // comment about its maximum value and in init().
+};
+
+typedef atomic_uint64_t AtomicPackedHeader;
+COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
+
+// Minimum alignment of 8 bytes for 32-bit, 16 for 64-bit
+const uptr MinAlignmentLog = FIRST_32_SECOND_64(3, 4);
+const uptr MaxAlignmentLog = 24;  // 16 MB
+const uptr MinAlignment = 1 << MinAlignmentLog;
+const uptr MaxAlignment = 1 << MaxAlignmentLog;
+
+// constexpr version of __sanitizer::RoundUp without the extraneous CHECK.
+// This way we can use it in constexpr variables and functions declarations.
+constexpr uptr RoundUpTo(uptr Size, uptr Boundary) {
+  return (Size + Boundary - 1) & ~(Boundary - 1);
+}
+
+namespace Chunk {
+  constexpr uptr getHeaderSize() {
+    return RoundUpTo(sizeof(PackedHeader), MinAlignment);
+  }
+}
+
+#if SANITIZER_CAN_USE_ALLOCATOR64
+const uptr AllocatorSpace = ~0ULL;
+struct AP64 {
+  static const uptr kSpaceBeg = AllocatorSpace;
+  static const uptr kSpaceSize = AllocatorSize;
+  static const uptr kMetadataSize = 0;
+  typedef __scudo::SizeClassMap SizeClassMap;
+  typedef NoOpMapUnmapCallback MapUnmapCallback;
+  static const uptr kFlags =
+      SizeClassAllocator64FlagMasks::kRandomShuffleChunks;
+  using AddressSpaceView = LocalAddressSpaceView;
+};
+typedef SizeClassAllocator64<AP64> PrimaryT;
+#else
+struct AP32 {
+  static const uptr kSpaceBeg = 0;
+  static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE;
+  static const uptr kMetadataSize = 0;
+  typedef __scudo::SizeClassMap SizeClassMap;
+  static const uptr kRegionSizeLog = RegionSizeLog;
+  using AddressSpaceView = LocalAddressSpaceView;
+  typedef NoOpMapUnmapCallback MapUnmapCallback;
+  static const uptr kFlags =
+      SizeClassAllocator32FlagMasks::kRandomShuffleChunks |
+      SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
+};
+typedef SizeClassAllocator32<AP32> PrimaryT;
+#endif  // SANITIZER_CAN_USE_ALLOCATOR64
+
+#include "scudo_allocator_secondary.h"
+
+typedef LargeMmapAllocator SecondaryT;
+
+#include "scudo_allocator_combined.h"
+
+typedef CombinedAllocator BackendT;
+typedef CombinedAllocator::AllocatorCache AllocatorCacheT;
+
+void initScudo();
+
+void *scudoAllocate(uptr Size, uptr Alignment, AllocType Type);
+void scudoDeallocate(void *Ptr, uptr Size, uptr Alignment, AllocType Type);
+void *scudoRealloc(void *Ptr, uptr Size);
+void *scudoCalloc(uptr NMemB, uptr Size);
+void *scudoValloc(uptr Size);
+void *scudoPvalloc(uptr Size);
+int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size);
+void *scudoAlignedAlloc(uptr Alignment, uptr Size);
+uptr scudoMallocUsableSize(void *Ptr);
+
+}  // namespace __scudo
+
+#endif  // SCUDO_ALLOCATOR_H_

diff  --git a/compiler-rt/lib/scudo/scudo_allocator_combined.h b/compiler-rt/lib/scudo/scudo_allocator_combined.h
new file mode 100644
index 0000000000000..d61cc9ec1a528
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_allocator_combined.h
@@ -0,0 +1,75 @@
+//===-- scudo_allocator_combined.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo Combined Allocator, dispatches allocation & deallocation requests to
+/// the Primary or the Secondary backend allocators.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_COMBINED_H_
+#define SCUDO_ALLOCATOR_COMBINED_H_
+
+#ifndef SCUDO_ALLOCATOR_H_
+# error "This file must be included inside scudo_allocator.h."
+#endif
+
+class CombinedAllocator {
+ public:
+  using PrimaryAllocator = PrimaryT;
+  using SecondaryAllocator = SecondaryT;
+  using AllocatorCache = typename PrimaryAllocator::AllocatorCache;
+  void init(s32 ReleaseToOSIntervalMs) {
+    Primary.Init(ReleaseToOSIntervalMs);
+    Secondary.Init();
+    Stats.Init();
+  }
+
+  // Primary allocations are always MinAlignment aligned, and as such do not
+  // require an Alignment parameter.
+  void *allocatePrimary(AllocatorCache *Cache, uptr ClassId) {
+    return Cache->Allocate(&Primary, ClassId);
+  }
+
+  // Secondary allocations do not require a Cache, but do require an Alignment
+  // parameter.
+  void *allocateSecondary(uptr Size, uptr Alignment) {
+    return Secondary.Allocate(&Stats, Size, Alignment);
+  }
+
+  void deallocatePrimary(AllocatorCache *Cache, void *Ptr, uptr ClassId) {
+    Cache->Deallocate(&Primary, ClassId, Ptr);
+  }
+
+  void deallocateSecondary(void *Ptr) {
+    Secondary.Deallocate(&Stats, Ptr);
+  }
+
+  void initCache(AllocatorCache *Cache) {
+    Cache->Init(&Stats);
+  }
+
+  void destroyCache(AllocatorCache *Cache) {
+    Cache->Destroy(&Primary, &Stats);
+  }
+
+  void getStats(AllocatorStatCounters StatType) const {
+    Stats.Get(StatType);
+  }
+
+  void printStats() {
+    Primary.PrintStats();
+    Secondary.PrintStats();
+  }
+
+ private:
+  PrimaryAllocator Primary;
+  SecondaryAllocator Secondary;
+  AllocatorGlobalStats Stats;
+};
+
+#endif  // SCUDO_ALLOCATOR_COMBINED_H_

diff  --git a/compiler-rt/lib/scudo/scudo_allocator_secondary.h b/compiler-rt/lib/scudo/scudo_allocator_secondary.h
new file mode 100644
index 0000000000000..80198c4aebf5b
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_allocator_secondary.h
@@ -0,0 +1,192 @@
+//===-- scudo_allocator_secondary.h -----------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo Secondary Allocator.
+/// This services allocation that are too large to be serviced by the Primary
+/// Allocator. It is directly backed by the memory mapping functions of the
+/// operating system.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_SECONDARY_H_
+#define SCUDO_ALLOCATOR_SECONDARY_H_
+
+#ifndef SCUDO_ALLOCATOR_H_
+# error "This file must be included inside scudo_allocator.h."
+#endif
+
+// Secondary backed allocations are standalone chunks that contain extra
+// information stored in a LargeChunk::Header prior to the frontend's header.
+//
+// The secondary takes care of alignment requirements (so that it can release
+// unnecessary pages in the rare event of larger alignments), and as such must
+// know about the frontend's header size.
+//
+// Since Windows doesn't support partial releasing of a reserved memory region,
+// we have to keep track of both the reserved and the committed memory.
+//
+// The resulting chunk resembles the following:
+//
+//   +--------------------+
+//   | Guard page(s)      |
+//   +--------------------+
+//   | Unused space*      |
+//   +--------------------+
+//   | LargeChunk::Header |
+//   +--------------------+
+//   | {Unp,P}ackedHeader |
+//   +--------------------+
+//   | Data (aligned)     |
+//   +--------------------+
+//   | Unused space**     |
+//   +--------------------+
+//   | Guard page(s)      |
+//   +--------------------+
+
+namespace LargeChunk {
+struct Header {
+  ReservedAddressRange StoredRange;
+  uptr CommittedSize;
+  uptr Size;
+};
+constexpr uptr getHeaderSize() {
+  return RoundUpTo(sizeof(Header), MinAlignment);
+}
+static Header *getHeader(uptr Ptr) {
+  return reinterpret_cast<Header *>(Ptr - getHeaderSize());
+}
+static Header *getHeader(const void *Ptr) {
+  return getHeader(reinterpret_cast<uptr>(Ptr));
+}
+}  // namespace LargeChunk
+
+class LargeMmapAllocator {
+ public:
+  void Init() {
+    internal_memset(this, 0, sizeof(*this));
+  }
+
+  void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) {
+    const uptr UserSize = Size - Chunk::getHeaderSize();
+    // The Scudo frontend prevents us from allocating more than
+    // MaxAllowedMallocSize, so integer overflow checks would be superfluous.
+    uptr ReservedSize = Size + LargeChunk::getHeaderSize();
+    if (UNLIKELY(Alignment > MinAlignment))
+      ReservedSize += Alignment;
+    const uptr PageSize = GetPageSizeCached();
+    ReservedSize = RoundUpTo(ReservedSize, PageSize);
+    // Account for 2 guard pages, one before and one after the chunk.
+    ReservedSize += 2 * PageSize;
+
+    ReservedAddressRange AddressRange;
+    uptr ReservedBeg = AddressRange.Init(ReservedSize, SecondaryAllocatorName);
+    if (UNLIKELY(ReservedBeg == ~static_cast<uptr>(0)))
+      return nullptr;
+    // A page-aligned pointer is assumed after that, so check it now.
+    DCHECK(IsAligned(ReservedBeg, PageSize));
+    uptr ReservedEnd = ReservedBeg + ReservedSize;
+    // The beginning of the user area for that allocation comes after the
+    // initial guard page, and both headers. This is the pointer that has to
+    // abide by alignment requirements.
+    uptr CommittedBeg = ReservedBeg + PageSize;
+    uptr UserBeg = CommittedBeg + HeadersSize;
+    uptr UserEnd = UserBeg + UserSize;
+    uptr CommittedEnd = RoundUpTo(UserEnd, PageSize);
+
+    // In the rare event of larger alignments, we will attempt to fit the mmap
+    // area better and unmap extraneous memory. This will also ensure that the
+    // offset and unused bytes field of the header stay small.
+    if (UNLIKELY(Alignment > MinAlignment)) {
+      if (!IsAligned(UserBeg, Alignment)) {
+        UserBeg = RoundUpTo(UserBeg, Alignment);
+        CommittedBeg = RoundDownTo(UserBeg - HeadersSize, PageSize);
+        const uptr NewReservedBeg = CommittedBeg - PageSize;
+        DCHECK_GE(NewReservedBeg, ReservedBeg);
+        if (!SANITIZER_WINDOWS && NewReservedBeg != ReservedBeg) {
+          AddressRange.Unmap(ReservedBeg, NewReservedBeg - ReservedBeg);
+          ReservedBeg = NewReservedBeg;
+        }
+        UserEnd = UserBeg + UserSize;
+        CommittedEnd = RoundUpTo(UserEnd, PageSize);
+      }
+      const uptr NewReservedEnd = CommittedEnd + PageSize;
+      DCHECK_LE(NewReservedEnd, ReservedEnd);
+      if (!SANITIZER_WINDOWS && NewReservedEnd != ReservedEnd) {
+        AddressRange.Unmap(NewReservedEnd, ReservedEnd - NewReservedEnd);
+        ReservedEnd = NewReservedEnd;
+      }
+    }
+
+    DCHECK_LE(UserEnd, CommittedEnd);
+    const uptr CommittedSize = CommittedEnd - CommittedBeg;
+    // Actually mmap the memory, preserving the guard pages on either sides.
+    CHECK_EQ(CommittedBeg, AddressRange.Map(CommittedBeg, CommittedSize));
+    const uptr Ptr = UserBeg - Chunk::getHeaderSize();
+    LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
+    H->StoredRange = AddressRange;
+    H->Size = CommittedEnd - Ptr;
+    H->CommittedSize = CommittedSize;
+
+    // The primary adds the whole class size to the stats when allocating a
+    // chunk, so we will do something similar here. But we will not account for
+    // the guard pages.
+    {
+      SpinMutexLock l(&StatsMutex);
+      Stats->Add(AllocatorStatAllocated, CommittedSize);
+      Stats->Add(AllocatorStatMapped, CommittedSize);
+      AllocatedBytes += CommittedSize;
+      if (LargestSize < CommittedSize)
+        LargestSize = CommittedSize;
+      NumberOfAllocs++;
+    }
+
+    return reinterpret_cast<void *>(Ptr);
+  }
+
+  void Deallocate(AllocatorStats *Stats, void *Ptr) {
+    LargeChunk::Header *H = LargeChunk::getHeader(Ptr);
+    // Since we're unmapping the entirety of where the ReservedAddressRange
+    // actually is, copy onto the stack.
+    ReservedAddressRange AddressRange = H->StoredRange;
+    const uptr Size = H->CommittedSize;
+    {
+      SpinMutexLock l(&StatsMutex);
+      Stats->Sub(AllocatorStatAllocated, Size);
+      Stats->Sub(AllocatorStatMapped, Size);
+      FreedBytes += Size;
+      NumberOfFrees++;
+    }
+    AddressRange.Unmap(reinterpret_cast<uptr>(AddressRange.base()),
+                       AddressRange.size());
+  }
+
+  static uptr GetActuallyAllocatedSize(void *Ptr) {
+    return LargeChunk::getHeader(Ptr)->Size;
+  }
+
+  void PrintStats() {
+    Printf("Stats: LargeMmapAllocator: allocated %zd times (%zd K), "
+           "freed %zd times (%zd K), remains %zd (%zd K) max %zd M\n",
+           NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
+           FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
+           (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
+  }
+
+ private:
+  static constexpr uptr HeadersSize =
+      LargeChunk::getHeaderSize() + Chunk::getHeaderSize();
+
+  StaticSpinMutex StatsMutex;
+  u32 NumberOfAllocs;
+  u32 NumberOfFrees;
+  uptr AllocatedBytes;
+  uptr FreedBytes;
+  uptr LargestSize;
+};
+
+#endif  // SCUDO_ALLOCATOR_SECONDARY_H_

diff  --git a/compiler-rt/lib/scudo/scudo_crc32.cpp b/compiler-rt/lib/scudo/scudo_crc32.cpp
new file mode 100644
index 0000000000000..137c44c5c1cda
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_crc32.cpp
@@ -0,0 +1,24 @@
+//===-- scudo_crc32.cpp -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// CRC32 function leveraging hardware specific instructions. This has to be
+/// kept separated to restrict the use of compiler specific flags to this file.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_crc32.h"
+
+namespace __scudo {
+
+#if defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+u32 computeHardwareCRC32(u32 Crc, uptr Data) {
+  return CRC32_INTRINSIC(Crc, Data);
+}
+#endif  // defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+
+}  // namespace __scudo

diff  --git a/compiler-rt/lib/scudo/scudo_crc32.h b/compiler-rt/lib/scudo/scudo_crc32.h
new file mode 100644
index 0000000000000..4314d30e929fe
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_crc32.h
@@ -0,0 +1,104 @@
+//===-- scudo_crc32.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo chunk header checksum related definitions.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_CRC32_H_
+#define SCUDO_CRC32_H_
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+// Hardware CRC32 is supported at compilation via the following:
+// - for i386 & x86_64: -mcrc32 (earlier: -msse4.2)
+// - for ARM & AArch64: -march=armv8-a+crc or -mcrc
+// An additional check must be performed at runtime as well to make sure the
+// emitted instructions are valid on the target host.
+
+#if defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+# if defined(__CRC32__)
+// NB: clang has <crc32intrin.h> but GCC does not
+#  include <smmintrin.h>
+#  define CRC32_INTRINSIC FIRST_32_SECOND_64(__builtin_ia32_crc32si, __builtin_ia32_crc32di)
+# elif defined(__SSE4_2__)
+#  include <smmintrin.h>
+#  define CRC32_INTRINSIC FIRST_32_SECOND_64(_mm_crc32_u32, _mm_crc32_u64)
+# endif
+# ifdef __ARM_FEATURE_CRC32
+#  include <arm_acle.h>
+#  define CRC32_INTRINSIC FIRST_32_SECOND_64(__crc32cw, __crc32cd)
+# endif
+#endif  // defined(__CRC32__) || defined(__SSE4_2__) || defined(__ARM_FEATURE_CRC32)
+
+namespace __scudo {
+
+enum : u8 {
+  CRC32Software = 0,
+  CRC32Hardware = 1,
+};
+
+static const u32 CRC32Table[] = {
+  0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
+  0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+  0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
+  0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+  0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
+  0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+  0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
+  0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+  0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
+  0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+  0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
+  0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+  0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
+  0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+  0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
+  0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+  0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
+  0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+  0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
+  0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+  0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
+  0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+  0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
+  0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+  0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
+  0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+  0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
+  0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+  0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
+  0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+  0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
+  0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+  0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
+  0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+  0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
+  0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+  0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
+  0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+  0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
+  0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+  0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
+  0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+  0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d
+};
+
+inline u32 computeSoftwareCRC32(u32 Crc, uptr Data) {
+  for (uptr i = 0; i < sizeof(Data); i++) {
+    Crc = CRC32Table[(Crc ^ Data) & 0xff] ^ (Crc >> 8);
+    Data >>= 8;
+  }
+  return Crc;
+}
+
+SANITIZER_WEAK_ATTRIBUTE u32 computeHardwareCRC32(u32 Crc, uptr Data);
+
+}  // namespace __scudo
+
+#endif  // SCUDO_CRC32_H_

diff  --git a/compiler-rt/lib/scudo/scudo_errors.cpp b/compiler-rt/lib/scudo/scudo_errors.cpp
new file mode 100644
index 0000000000000..4bea9ebc6ab05
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_errors.cpp
@@ -0,0 +1,77 @@
+//===-- scudo_errors.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Verbose termination functions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_utils.h"
+
+#include "sanitizer_common/sanitizer_flags.h"
+
+namespace __scudo {
+
+void NORETURN reportCallocOverflow(uptr Count, uptr Size) {
+  dieWithMessage("calloc parameters overflow: count * size (%zd * %zd) cannot "
+      "be represented with type size_t\n", Count, Size);
+}
+
+void NORETURN reportPvallocOverflow(uptr Size) {
+  dieWithMessage("pvalloc parameters overflow: size 0x%zx rounded up to system "
+      "page size 0x%zx cannot be represented in type size_t\n", Size,
+      GetPageSizeCached());
+}
+
+void NORETURN reportAllocationAlignmentTooBig(uptr Alignment,
+                                              uptr MaxAlignment) {
+  dieWithMessage("invalid allocation alignment: %zd exceeds maximum supported "
+      "allocation of %zd\n", Alignment, MaxAlignment);
+}
+
+void NORETURN reportAllocationAlignmentNotPowerOfTwo(uptr Alignment) {
+  dieWithMessage("invalid allocation alignment: %zd, alignment must be a power "
+      "of two\n", Alignment);
+}
+
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment) {
+  dieWithMessage(
+      "invalid alignment requested in posix_memalign: %zd, alignment"
+      " must be a power of two and a multiple of sizeof(void *) == %zd\n",
+      Alignment, sizeof(void *));
+}
+
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment) {
+#if SANITIZER_POSIX
+  dieWithMessage("invalid alignment requested in aligned_alloc: %zd, alignment "
+      "must be a power of two and the requested size 0x%zx must be a multiple "
+      "of alignment\n", Alignment, Size);
+#else
+  dieWithMessage("invalid alignment requested in aligned_alloc: %zd, the "
+      "requested size 0x%zx must be a multiple of alignment\n", Alignment,
+      Size);
+#endif
+}
+
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+                                         uptr MaxSize) {
+  dieWithMessage("requested allocation size 0x%zx (0x%zx after adjustments) "
+      "exceeds maximum supported size of 0x%zx\n", UserSize, TotalSize,
+      MaxSize);
+}
+
+void NORETURN reportRssLimitExceeded() {
+  dieWithMessage("specified RSS limit exceeded, currently set to "
+      "soft_rss_limit_mb=%zd\n", common_flags()->soft_rss_limit_mb);
+}
+
+void NORETURN reportOutOfMemory(uptr RequestedSize) {
+  dieWithMessage("allocator is out of memory trying to allocate 0x%zx bytes\n",
+                 RequestedSize);
+}
+
+}  // namespace __scudo

diff  --git a/compiler-rt/lib/scudo/scudo_errors.h b/compiler-rt/lib/scudo/scudo_errors.h
new file mode 100644
index 0000000000000..258695c2c02c7
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_errors.h
@@ -0,0 +1,34 @@
+//===-- scudo_errors.h ------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Header for scudo_errors.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ERRORS_H_
+#define SCUDO_ERRORS_H_
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+namespace __scudo {
+
+void NORETURN reportCallocOverflow(uptr Count, uptr Size);
+void NORETURN reportPvallocOverflow(uptr Size);
+void NORETURN reportAllocationAlignmentTooBig(uptr Alignment,
+                                              uptr MaxAlignment);
+void NORETURN reportAllocationAlignmentNotPowerOfTwo(uptr Alignment);
+void NORETURN reportInvalidPosixMemalignAlignment(uptr Alignment);
+void NORETURN reportInvalidAlignedAllocAlignment(uptr Size, uptr Alignment);
+void NORETURN reportAllocationSizeTooBig(uptr UserSize, uptr TotalSize,
+                                         uptr MaxSize);
+void NORETURN reportRssLimitExceeded();
+void NORETURN reportOutOfMemory(uptr RequestedSize);
+
+}  // namespace __scudo
+
+#endif  // SCUDO_ERRORS_H_

diff  --git a/compiler-rt/lib/scudo/scudo_flags.cpp b/compiler-rt/lib/scudo/scudo_flags.cpp
new file mode 100644
index 0000000000000..c564e217b35bf
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_flags.cpp
@@ -0,0 +1,136 @@
+//===-- scudo_flags.cpp -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Hardened Allocator flag parsing logic.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_flags.h"
+#include "scudo_interface_internal.h"
+#include "scudo_utils.h"
+
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+
+namespace __scudo {
+
+static Flags ScudoFlags;  // Use via getFlags().
+
+void Flags::setDefaults() {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "scudo_flags.inc"
+#undef SCUDO_FLAG
+}
+
+static void RegisterScudoFlags(FlagParser *parser, Flags *f) {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) \
+  RegisterFlag(parser, #Name, Description, &f->Name);
+#include "scudo_flags.inc"
+#undef SCUDO_FLAG
+}
+
+static const char *getCompileDefinitionScudoDefaultOptions() {
+#ifdef SCUDO_DEFAULT_OPTIONS
+  return SANITIZER_STRINGIFY(SCUDO_DEFAULT_OPTIONS);
+#else
+  return "";
+#endif
+}
+
+static const char *getScudoDefaultOptions() {
+  return (&__scudo_default_options) ? __scudo_default_options() : "";
+}
+
+void initFlags() {
+  SetCommonFlagsDefaults();
+  {
+    CommonFlags cf;
+    cf.CopyFrom(*common_flags());
+    cf.exitcode = 1;
+    OverrideCommonFlags(cf);
+  }
+  Flags *f = getFlags();
+  f->setDefaults();
+
+  FlagParser ScudoParser;
+  RegisterScudoFlags(&ScudoParser, f);
+  RegisterCommonFlags(&ScudoParser);
+
+  // Override from compile definition.
+  ScudoParser.ParseString(getCompileDefinitionScudoDefaultOptions());
+
+  // Override from user-specified string.
+  ScudoParser.ParseString(getScudoDefaultOptions());
+
+  // Override from environment.
+  ScudoParser.ParseStringFromEnv("SCUDO_OPTIONS");
+
+  InitializeCommonFlags();
+
+  // Sanity checks and default settings for the Quarantine parameters.
+
+  if (f->QuarantineSizeMb >= 0) {
+    // Backward compatible logic if QuarantineSizeMb is set.
+    if (f->QuarantineSizeKb >= 0) {
+      dieWithMessage("ERROR: please use either QuarantineSizeMb (deprecated) "
+          "or QuarantineSizeKb, but not both\n");
+    }
+    if (f->QuarantineChunksUpToSize >= 0) {
+      dieWithMessage("ERROR: QuarantineChunksUpToSize cannot be used in "
+          " conjunction with the deprecated QuarantineSizeMb option\n");
+    }
+    // If everything is in order, update QuarantineSizeKb accordingly.
+    f->QuarantineSizeKb = f->QuarantineSizeMb * 1024;
+  } else {
+    // Otherwise proceed with the new options.
+    if (f->QuarantineSizeKb < 0) {
+      const int DefaultQuarantineSizeKb = FIRST_32_SECOND_64(64, 256);
+      f->QuarantineSizeKb = DefaultQuarantineSizeKb;
+    }
+    if (f->QuarantineChunksUpToSize < 0) {
+      const int DefaultQuarantineChunksUpToSize = FIRST_32_SECOND_64(512, 2048);
+      f->QuarantineChunksUpToSize = DefaultQuarantineChunksUpToSize;
+    }
+  }
+
+  // We enforce an upper limit for the chunk quarantine threshold of 4Mb.
+  if (f->QuarantineChunksUpToSize > (4 * 1024 * 1024)) {
+    dieWithMessage("ERROR: the chunk quarantine threshold is too large\n");
+  }
+
+  // We enforce an upper limit for the quarantine size of 32Mb.
+  if (f->QuarantineSizeKb > (32 * 1024)) {
+    dieWithMessage("ERROR: the quarantine size is too large\n");
+  }
+
+  if (f->ThreadLocalQuarantineSizeKb < 0) {
+    const int DefaultThreadLocalQuarantineSizeKb = FIRST_32_SECOND_64(16, 64);
+    f->ThreadLocalQuarantineSizeKb = DefaultThreadLocalQuarantineSizeKb;
+  }
+  // And an upper limit of 8Mb for the thread quarantine cache.
+  if (f->ThreadLocalQuarantineSizeKb > (8 * 1024)) {
+    dieWithMessage("ERROR: the per thread quarantine cache size is too "
+        "large\n");
+  }
+  if (f->ThreadLocalQuarantineSizeKb == 0 && f->QuarantineSizeKb > 0) {
+    dieWithMessage("ERROR: ThreadLocalQuarantineSizeKb can be set to 0 only "
+        "when QuarantineSizeKb is set to 0\n");
+  }
+}
+
+Flags *getFlags() {
+  return &ScudoFlags;
+}
+
+}  // namespace __scudo
+
+#if !SANITIZER_SUPPORTS_WEAK_HOOKS
+SANITIZER_INTERFACE_WEAK_DEF(const char*, __scudo_default_options, void) {
+  return "";
+}
+#endif

diff  --git a/compiler-rt/lib/scudo/scudo_flags.h b/compiler-rt/lib/scudo/scudo_flags.h
new file mode 100644
index 0000000000000..483c79621cbf4
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_flags.h
@@ -0,0 +1,32 @@
+//===-- scudo_flags.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Header for scudo_flags.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAGS_H_
+#define SCUDO_FLAGS_H_
+
+namespace __scudo {
+
+struct Flags {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "scudo_flags.inc"
+#undef SCUDO_FLAG
+
+  void setDefaults();
+};
+
+Flags *getFlags();
+
+void initFlags();
+
+}  // namespace __scudo
+
+#endif  // SCUDO_FLAGS_H_

diff  --git a/compiler-rt/lib/scudo/scudo_flags.inc b/compiler-rt/lib/scudo/scudo_flags.inc
new file mode 100644
index 0000000000000..c124738c1f3a7
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_flags.inc
@@ -0,0 +1,48 @@
+//===-- scudo_flags.inc -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Hardened Allocator runtime flags.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAG
+# error "Define SCUDO_FLAG prior to including this file!"
+#endif
+
+SCUDO_FLAG(int, QuarantineSizeMb, -1,
+           "Deprecated. Please use QuarantineSizeKb.")
+
+// Default value is set in scudo_flags.cpp based on architecture.
+SCUDO_FLAG(int, QuarantineSizeKb, -1,
+           "Size in KB of quarantine used to delay the actual deallocation of "
+           "chunks. Lower value may reduce memory usage but decrease the "
+           "effectiveness of the mitigation. Defaults to 64KB (32-bit) or "
+           "256KB (64-bit)")
+
+// Default value is set in scudo_flags.cpp based on architecture.
+SCUDO_FLAG(int, ThreadLocalQuarantineSizeKb, -1,
+          "Size in KB of per-thread cache used to offload the global "
+          "quarantine. Lower value may reduce memory usage but might increase "
+          "the contention on the global quarantine. Defaults to 16KB (32-bit) "
+          "or 64KB (64-bit)")
+
+// Default value is set in scudo_flags.cpp based on architecture.
+SCUDO_FLAG(int, QuarantineChunksUpToSize, -1,
+          "Size in bytes up to which chunks will be quarantined (if lower than"
+          "or equal to). Defaults to 256 (32-bit) or 2048 (64-bit)")
+
+// Disable the deallocation type check by default on Android, it causes too many
+// issues with third party libraries.
+SCUDO_FLAG(bool, DeallocationTypeMismatch, !SANITIZER_ANDROID,
+          "Report errors on malloc/delete, new/free, new/delete[], etc.")
+
+SCUDO_FLAG(bool, DeleteSizeMismatch, true,
+           "Report errors on mismatch between size of new and delete.")
+
+SCUDO_FLAG(bool, ZeroContents, false,
+          "Zero chunk contents on allocation and deallocation.")

diff  --git a/compiler-rt/lib/scudo/scudo_interface_internal.h b/compiler-rt/lib/scudo/scudo_interface_internal.h
new file mode 100644
index 0000000000000..75c63aa6d4893
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_interface_internal.h
@@ -0,0 +1,32 @@
+//===-- scudo_interface_internal.h ------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Private Scudo interface header.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_INTERFACE_INTERNAL_H_
+#define SCUDO_INTERFACE_INTERNAL_H_
+
+#include "sanitizer_common/sanitizer_internal_defs.h"
+
+using __sanitizer::uptr;
+using __sanitizer::s32;
+
+extern "C" {
+SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
+const char* __scudo_default_options();
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __scudo_set_rss_limit(uptr LimitMb, s32 HardLimit);
+
+SANITIZER_INTERFACE_ATTRIBUTE
+void __scudo_print_stats();
+}  // extern "C"
+
+#endif  // SCUDO_INTERFACE_INTERNAL_H_

diff  --git a/compiler-rt/lib/scudo/scudo_malloc.cpp b/compiler-rt/lib/scudo/scudo_malloc.cpp
new file mode 100644
index 0000000000000..a72b861e28ee4
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_malloc.cpp
@@ -0,0 +1,84 @@
+//===-- scudo_malloc.cpp ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Interceptors for malloc related functions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_allocator.h"
+
+#include "interception/interception.h"
+#include "sanitizer_common/sanitizer_platform_interceptors.h"
+
+#include <stddef.h>
+
+using namespace __scudo;
+
+extern "C" {
+INTERCEPTOR_ATTRIBUTE void free(void *ptr) {
+  scudoDeallocate(ptr, 0, 0, FromMalloc);
+}
+
+INTERCEPTOR_ATTRIBUTE void *malloc(size_t size) {
+  return scudoAllocate(size, 0, FromMalloc);
+}
+
+INTERCEPTOR_ATTRIBUTE void *realloc(void *ptr, size_t size) {
+  return scudoRealloc(ptr, size);
+}
+
+INTERCEPTOR_ATTRIBUTE void *calloc(size_t nmemb, size_t size) {
+  return scudoCalloc(nmemb, size);
+}
+
+INTERCEPTOR_ATTRIBUTE void *valloc(size_t size) {
+  return scudoValloc(size);
+}
+
+INTERCEPTOR_ATTRIBUTE
+int posix_memalign(void **memptr, size_t alignment, size_t size) {
+  return scudoPosixMemalign(memptr, alignment, size);
+}
+
+#if SANITIZER_INTERCEPT_CFREE
+INTERCEPTOR_ATTRIBUTE void cfree(void *ptr) ALIAS("free");
+#endif
+
+#if SANITIZER_INTERCEPT_MEMALIGN
+INTERCEPTOR_ATTRIBUTE void *memalign(size_t alignment, size_t size) {
+  return scudoAllocate(size, alignment, FromMemalign);
+}
+
+INTERCEPTOR_ATTRIBUTE
+void *__libc_memalign(size_t alignment, size_t size) ALIAS("memalign");
+#endif
+
+#if SANITIZER_INTERCEPT_PVALLOC
+INTERCEPTOR_ATTRIBUTE void *pvalloc(size_t size) {
+  return scudoPvalloc(size);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_ALIGNED_ALLOC
+INTERCEPTOR_ATTRIBUTE void *aligned_alloc(size_t alignment, size_t size) {
+  return scudoAlignedAlloc(alignment, size);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_MALLOC_USABLE_SIZE
+INTERCEPTOR_ATTRIBUTE size_t malloc_usable_size(void *ptr) {
+  return scudoMallocUsableSize(ptr);
+}
+#endif
+
+#if SANITIZER_INTERCEPT_MALLOPT_AND_MALLINFO
+INTERCEPTOR_ATTRIBUTE int mallopt(int cmd, int value) {
+  return 0;
+}
+#endif
+}  // extern "C"

diff  --git a/compiler-rt/lib/scudo/scudo_new_delete.cpp b/compiler-rt/lib/scudo/scudo_new_delete.cpp
new file mode 100644
index 0000000000000..03eef7f28bb92
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_new_delete.cpp
@@ -0,0 +1,107 @@
+//===-- scudo_new_delete.cpp ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Interceptors for operators new and delete.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_allocator.h"
+#include "scudo_errors.h"
+
+#include "interception/interception.h"
+
+#include <stddef.h>
+
+using namespace __scudo;
+
+#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
+
+// Fake std::nothrow_t to avoid including <new>.
+namespace std {
+struct nothrow_t {};
+enum class align_val_t: size_t {};
+}  // namespace std
+
+// TODO(alekseys): throw std::bad_alloc instead of dying on OOM.
+#define OPERATOR_NEW_BODY_ALIGN(Type, Align, NoThrow)              \
+  void *Ptr = scudoAllocate(size, static_cast<uptr>(Align), Type); \
+  if (!NoThrow && UNLIKELY(!Ptr)) reportOutOfMemory(size);         \
+  return Ptr;
+#define OPERATOR_NEW_BODY(Type, NoThrow) \
+  OPERATOR_NEW_BODY_ALIGN(Type, 0, NoThrow)
+
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size)
+{ OPERATOR_NEW_BODY(FromNew, /*NoThrow=*/false); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size)
+{ OPERATOR_NEW_BODY(FromNewArray, /*NoThrow=*/false); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(FromNew, /*NoThrow=*/true); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY(FromNewArray, /*NoThrow=*/true); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(FromNew, align, /*NoThrow=*/false); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align)
+{ OPERATOR_NEW_BODY_ALIGN(FromNewArray, align, /*NoThrow=*/false); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(FromNew, align, /*NoThrow=*/true); }
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_NEW_BODY_ALIGN(FromNewArray, align, /*NoThrow=*/true); }
+
+#define OPERATOR_DELETE_BODY(Type) \
+  scudoDeallocate(ptr, 0, 0, Type);
+#define OPERATOR_DELETE_BODY_SIZE(Type) \
+  scudoDeallocate(ptr, size, 0, Type);
+#define OPERATOR_DELETE_BODY_ALIGN(Type) \
+  scudoDeallocate(ptr, 0, static_cast<uptr>(align), Type);
+#define OPERATOR_DELETE_BODY_SIZE_ALIGN(Type) \
+  scudoDeallocate(ptr, size, static_cast<uptr>(align), Type);
+
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT
+{ OPERATOR_DELETE_BODY(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT
+{ OPERATOR_DELETE_BODY(FromNewArray); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY(FromNewArray); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE(FromNewArray); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_ALIGN(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_ALIGN(FromNewArray); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY_ALIGN(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::align_val_t align, std::nothrow_t const&)
+{ OPERATOR_DELETE_BODY_ALIGN(FromNewArray); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FromNew); }
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size, std::align_val_t align) NOEXCEPT
+{ OPERATOR_DELETE_BODY_SIZE_ALIGN(FromNewArray); }

diff  --git a/compiler-rt/lib/scudo/scudo_platform.h b/compiler-rt/lib/scudo/scudo_platform.h
new file mode 100644
index 0000000000000..07d4b70fc8e9b
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_platform.h
@@ -0,0 +1,93 @@
+//===-- scudo_platform.h ----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo platform specific definitions.
+/// TODO(kostyak): add tests for the compile time defines.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_PLATFORM_H_
+#define SCUDO_PLATFORM_H_
+
+#include "sanitizer_common/sanitizer_allocator.h"
+
+#if !SANITIZER_LINUX && !SANITIZER_FUCHSIA
+# error "The Scudo hardened allocator is not supported on this platform."
+#endif
+
+#define SCUDO_TSD_EXCLUSIVE_SUPPORTED (!SANITIZER_ANDROID && !SANITIZER_FUCHSIA)
+
+#ifndef SCUDO_TSD_EXCLUSIVE
+// SCUDO_TSD_EXCLUSIVE wasn't defined, use a default TSD model for the platform.
+# if SANITIZER_ANDROID || SANITIZER_FUCHSIA
+// Android and Fuchsia use a pool of TSDs shared between threads.
+#  define SCUDO_TSD_EXCLUSIVE 0
+# elif SANITIZER_LINUX && !SANITIZER_ANDROID
+// Non-Android Linux use an exclusive TSD per thread.
+#  define SCUDO_TSD_EXCLUSIVE 1
+# else
+#  error "No default TSD model defined for this platform."
+# endif  // SANITIZER_ANDROID || SANITIZER_FUCHSIA
+#endif  // SCUDO_TSD_EXCLUSIVE
+
+// If the exclusive TSD model is chosen, make sure the platform supports it.
+#if SCUDO_TSD_EXCLUSIVE && !SCUDO_TSD_EXCLUSIVE_SUPPORTED
+# error "The exclusive TSD model is not supported on this platform."
+#endif
+
+// Maximum number of TSDs that can be created for the Shared model.
+#ifndef SCUDO_SHARED_TSD_POOL_SIZE
+# if SANITIZER_ANDROID
+#  define SCUDO_SHARED_TSD_POOL_SIZE 2U
+# else
+#  define SCUDO_SHARED_TSD_POOL_SIZE 32U
+# endif  // SANITIZER_ANDROID
+#endif  // SCUDO_SHARED_TSD_POOL_SIZE
+
+// The following allows the public interface functions to be disabled.
+#ifndef SCUDO_CAN_USE_PUBLIC_INTERFACE
+# define SCUDO_CAN_USE_PUBLIC_INTERFACE 1
+#endif
+
+// Hooks in the allocation & deallocation paths can become a security concern if
+// implemented improperly, or if overwritten by an attacker. Use with caution.
+#ifndef SCUDO_CAN_USE_HOOKS
+# if SANITIZER_FUCHSIA
+#  define SCUDO_CAN_USE_HOOKS 1
+# else
+#  define SCUDO_CAN_USE_HOOKS 0
+# endif  // SANITIZER_FUCHSIA
+#endif  // SCUDO_CAN_USE_HOOKS
+
+namespace __scudo {
+
+#if SANITIZER_CAN_USE_ALLOCATOR64
+# if defined(__aarch64__) && SANITIZER_ANDROID
+const uptr AllocatorSize = 0x4000000000ULL;  // 256G.
+# elif defined(__aarch64__)
+const uptr AllocatorSize = 0x10000000000ULL;  // 1T.
+# else
+const uptr AllocatorSize = 0x40000000000ULL;  // 4T.
+# endif
+#else
+const uptr RegionSizeLog = SANITIZER_ANDROID ? 19 : 20;
+#endif  // SANITIZER_CAN_USE_ALLOCATOR64
+
+#if !defined(SCUDO_SIZE_CLASS_MAP)
+# define SCUDO_SIZE_CLASS_MAP Dense
+#endif
+
+#define SIZE_CLASS_MAP_TYPE SIZE_CLASS_MAP_TYPE_(SCUDO_SIZE_CLASS_MAP)
+#define SIZE_CLASS_MAP_TYPE_(T) SIZE_CLASS_MAP_TYPE__(T)
+#define SIZE_CLASS_MAP_TYPE__(T) T##SizeClassMap
+
+typedef SIZE_CLASS_MAP_TYPE SizeClassMap;
+
+}  // namespace __scudo
+
+#endif // SCUDO_PLATFORM_H_

diff  --git a/compiler-rt/lib/scudo/scudo_termination.cpp b/compiler-rt/lib/scudo/scudo_termination.cpp
new file mode 100644
index 0000000000000..5f1337efaca0d
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_termination.cpp
@@ -0,0 +1,41 @@
+//===-- scudo_termination.cpp -----------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// This file contains bare-bones termination functions to replace the
+/// __sanitizer ones, in order to avoid any potential abuse of the callbacks
+/// functionality.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_utils.h"
+
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __sanitizer {
+
+bool AddDieCallback(DieCallbackType Callback) { return true; }
+
+bool RemoveDieCallback(DieCallbackType Callback) { return true; }
+
+void SetUserDieCallback(DieCallbackType Callback) {}
+
+void NORETURN Die() {
+  if (common_flags()->abort_on_error)
+    Abort();
+  internal__exit(common_flags()->exitcode);
+}
+
+void SetCheckUnwindCallback(void (*callback)()) {}
+
+void NORETURN CheckFailed(const char *File, int Line, const char *Condition,
+                          u64 Value1, u64 Value2) {
+  __scudo::dieWithMessage("CHECK failed at %s:%d %s (%lld, %lld)\n",
+                          File, Line, Condition, Value1, Value2);
+}
+
+}  // namespace __sanitizer

diff  --git a/compiler-rt/lib/scudo/scudo_tsd.h b/compiler-rt/lib/scudo/scudo_tsd.h
new file mode 100644
index 0000000000000..eef4a7ba1e656
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_tsd.h
@@ -0,0 +1,65 @@
+//===-- scudo_tsd.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo thread specific data definition.
+/// Implementation will 
diff er based on the thread local storage primitives
+/// offered by the underlying platform.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_H_
+#define SCUDO_TSD_H_
+
+#include "scudo_allocator.h"
+#include "scudo_utils.h"
+
+#include <pthread.h>
+
+namespace __scudo {
+
+struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) ScudoTSD {
+  AllocatorCacheT Cache;
+  uptr QuarantineCachePlaceHolder[4];
+
+  void init();
+  void commitBack();
+
+  inline bool tryLock() SANITIZER_TRY_ACQUIRE(true, Mutex) {
+    if (Mutex.TryLock()) {
+      atomic_store_relaxed(&Precedence, 0);
+      return true;
+    }
+    if (atomic_load_relaxed(&Precedence) == 0)
+      atomic_store_relaxed(&Precedence, static_cast<uptr>(
+          MonotonicNanoTime() >> FIRST_32_SECOND_64(16, 0)));
+    return false;
+  }
+
+  inline void lock() SANITIZER_ACQUIRE(Mutex) {
+    atomic_store_relaxed(&Precedence, 0);
+    Mutex.Lock();
+  }
+
+  inline void unlock() SANITIZER_RELEASE(Mutex) { Mutex.Unlock(); }
+
+  inline uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
+
+ private:
+  StaticSpinMutex Mutex;
+  atomic_uintptr_t Precedence;
+};
+
+void initThread(bool MinimalInit);
+
+// TSD model specific fastpath functions definitions.
+#include "scudo_tsd_exclusive.inc"
+#include "scudo_tsd_shared.inc"
+
+}  // namespace __scudo
+
+#endif  // SCUDO_TSD_H_

diff  --git a/compiler-rt/lib/scudo/scudo_tsd_exclusive.cpp b/compiler-rt/lib/scudo/scudo_tsd_exclusive.cpp
new file mode 100644
index 0000000000000..a203a74bbcf8e
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_tsd_exclusive.cpp
@@ -0,0 +1,67 @@
+//===-- scudo_tsd_exclusive.cpp ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo exclusive TSD implementation.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_tsd.h"
+
+#if SCUDO_TSD_EXCLUSIVE
+
+namespace __scudo {
+
+static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT;
+static pthread_key_t PThreadKey;
+
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL ThreadState ScudoThreadState = ThreadNotInitialized;
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL ScudoTSD TSD;
+
+// Fallback TSD for when the thread isn't initialized yet or is torn down. It
+// can be shared between multiple threads and as such must be locked.
+ScudoTSD FallbackTSD;
+
+static void teardownThread(void *Ptr) {
+  uptr I = reinterpret_cast<uptr>(Ptr);
+  // The glibc POSIX thread-local-storage deallocation routine calls user
+  // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
+  // We want to be called last since other destructors might call free and the
+  // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
+  // quarantine and swallowing the cache.
+  if (I > 1) {
+    // If pthread_setspecific fails, we will go ahead with the teardown.
+    if (LIKELY(pthread_setspecific(PThreadKey,
+                                   reinterpret_cast<void *>(I - 1)) == 0))
+      return;
+  }
+  TSD.commitBack();
+  ScudoThreadState = ThreadTornDown;
+}
+
+
+static void initOnce() {
+  CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread), 0);
+  initScudo();
+  FallbackTSD.init();
+}
+
+void initThread(bool MinimalInit) {
+  CHECK_EQ(pthread_once(&GlobalInitialized, initOnce), 0);
+  if (UNLIKELY(MinimalInit))
+    return;
+  CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(
+      GetPthreadDestructorIterations())), 0);
+  TSD.init();
+  ScudoThreadState = ThreadInitialized;
+}
+
+}  // namespace __scudo
+
+#endif  // SCUDO_TSD_EXCLUSIVE

diff  --git a/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc b/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
new file mode 100644
index 0000000000000..dc4d982f2fa8b
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_tsd_exclusive.inc
@@ -0,0 +1,47 @@
+//===-- scudo_tsd_exclusive.inc ---------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo exclusive TSD fastpath functions implementation.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_H_
+#error "This file must be included inside scudo_tsd.h."
+#endif // SCUDO_TSD_H_
+
+#if SCUDO_TSD_EXCLUSIVE
+
+enum ThreadState : u8 {
+  ThreadNotInitialized = 0,
+  ThreadInitialized,
+  ThreadTornDown,
+};
+__attribute__((
+    tls_model("initial-exec"))) extern THREADLOCAL ThreadState ScudoThreadState;
+__attribute__((tls_model("initial-exec"))) extern THREADLOCAL ScudoTSD TSD;
+
+extern ScudoTSD FallbackTSD;
+
+ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
+  if (LIKELY(ScudoThreadState != ThreadNotInitialized))
+    return;
+  initThread(MinimalInit);
+}
+
+ALWAYS_INLINE ScudoTSD *
+getTSDAndLock(bool *UnlockRequired) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+  if (UNLIKELY(ScudoThreadState != ThreadInitialized)) {
+    FallbackTSD.lock();
+    *UnlockRequired = true;
+    return &FallbackTSD;
+  }
+  *UnlockRequired = false;
+  return &TSD;
+}
+
+#endif // SCUDO_TSD_EXCLUSIVE

diff  --git a/compiler-rt/lib/scudo/scudo_tsd_shared.cpp b/compiler-rt/lib/scudo/scudo_tsd_shared.cpp
new file mode 100644
index 0000000000000..fc691b21a2135
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_tsd_shared.cpp
@@ -0,0 +1,107 @@
+//===-- scudo_tsd_shared.cpp ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo shared TSD implementation.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_tsd.h"
+
+#if !SCUDO_TSD_EXCLUSIVE
+
+namespace __scudo {
+
+static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT;
+pthread_key_t PThreadKey;
+
+static atomic_uint32_t CurrentIndex;
+static ScudoTSD *TSDs;
+static u32 NumberOfTSDs;
+static u32 CoPrimes[SCUDO_SHARED_TSD_POOL_SIZE];
+static u32 NumberOfCoPrimes = 0;
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+__attribute__((tls_model("initial-exec")))
+THREADLOCAL ScudoTSD *CurrentTSD;
+#endif
+
+static void initOnce() {
+  CHECK_EQ(pthread_key_create(&PThreadKey, NULL), 0);
+  initScudo();
+  NumberOfTSDs = Min(Max(1U, GetNumberOfCPUsCached()),
+                     static_cast<u32>(SCUDO_SHARED_TSD_POOL_SIZE));
+  TSDs = reinterpret_cast<ScudoTSD *>(
+      MmapOrDie(sizeof(ScudoTSD) * NumberOfTSDs, "ScudoTSDs"));
+  for (u32 I = 0; I < NumberOfTSDs; I++) {
+    TSDs[I].init();
+    u32 A = I + 1;
+    u32 B = NumberOfTSDs;
+    while (B != 0) { const u32 T = A; A = B; B = T % B; }
+    if (A == 1)
+      CoPrimes[NumberOfCoPrimes++] = I + 1;
+  }
+}
+
+ALWAYS_INLINE void setCurrentTSD(ScudoTSD *TSD) {
+#if SANITIZER_ANDROID
+  *get_android_tls_ptr() = reinterpret_cast<uptr>(TSD);
+#elif SANITIZER_LINUX
+  CurrentTSD = TSD;
+#else
+  CHECK_EQ(pthread_setspecific(PThreadKey, reinterpret_cast<void *>(TSD)), 0);
+#endif  // SANITIZER_ANDROID
+}
+
+void initThread(bool MinimalInit) {
+  pthread_once(&GlobalInitialized, initOnce);
+  // Initial context assignment is done in a plain round-robin fashion.
+  u32 Index = atomic_fetch_add(&CurrentIndex, 1, memory_order_relaxed);
+  setCurrentTSD(&TSDs[Index % NumberOfTSDs]);
+}
+
+ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+  if (NumberOfTSDs > 1) {
+    // Use the Precedence of the current TSD as our random seed. Since we are in
+    // the slow path, it means that tryLock failed, and as a result it's very
+    // likely that said Precedence is non-zero.
+    u32 RandState = static_cast<u32>(TSD->getPrecedence());
+    const u32 R = Rand(&RandState);
+    const u32 Inc = CoPrimes[R % NumberOfCoPrimes];
+    u32 Index = R % NumberOfTSDs;
+    uptr LowestPrecedence = UINTPTR_MAX;
+    ScudoTSD *CandidateTSD = nullptr;
+    // Go randomly through at most 4 contexts and find a candidate.
+    for (u32 I = 0; I < Min(4U, NumberOfTSDs); I++) {
+      if (TSDs[Index].tryLock()) {
+        setCurrentTSD(&TSDs[Index]);
+        return &TSDs[Index];
+      }
+      const uptr Precedence = TSDs[Index].getPrecedence();
+      // A 0 precedence here means another thread just locked this TSD.
+      if (Precedence && Precedence < LowestPrecedence) {
+        CandidateTSD = &TSDs[Index];
+        LowestPrecedence = Precedence;
+      }
+      Index += Inc;
+      if (Index >= NumberOfTSDs)
+        Index -= NumberOfTSDs;
+    }
+    if (CandidateTSD) {
+      CandidateTSD->lock();
+      setCurrentTSD(CandidateTSD);
+      return CandidateTSD;
+    }
+  }
+  // Last resort, stick with the current one.
+  TSD->lock();
+  return TSD;
+}
+
+}  // namespace __scudo
+
+#endif  // !SCUDO_TSD_EXCLUSIVE

diff  --git a/compiler-rt/lib/scudo/scudo_tsd_shared.inc b/compiler-rt/lib/scudo/scudo_tsd_shared.inc
new file mode 100644
index 0000000000000..b25392a9630ef
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_tsd_shared.inc
@@ -0,0 +1,56 @@
+//===-- scudo_tsd_shared.inc ------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo shared TSD fastpath functions implementation.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_TSD_H_
+# error "This file must be included inside scudo_tsd.h."
+#endif  // SCUDO_TSD_H_
+
+#if !SCUDO_TSD_EXCLUSIVE
+
+extern pthread_key_t PThreadKey;
+
+#if SANITIZER_LINUX && !SANITIZER_ANDROID
+__attribute__((tls_model("initial-exec")))
+extern THREADLOCAL ScudoTSD *CurrentTSD;
+#endif
+
+ALWAYS_INLINE ScudoTSD* getCurrentTSD() {
+#if SANITIZER_ANDROID
+  return reinterpret_cast<ScudoTSD *>(*get_android_tls_ptr());
+#elif SANITIZER_LINUX
+  return CurrentTSD;
+#else
+  return reinterpret_cast<ScudoTSD *>(pthread_getspecific(PThreadKey));
+#endif  // SANITIZER_ANDROID
+}
+
+ALWAYS_INLINE void initThreadMaybe(bool MinimalInit = false) {
+  if (LIKELY(getCurrentTSD()))
+    return;
+  initThread(MinimalInit);
+}
+
+ScudoTSD *getTSDAndLockSlow(ScudoTSD *TSD);
+
+ALWAYS_INLINE ScudoTSD *
+getTSDAndLock(bool *UnlockRequired) SANITIZER_NO_THREAD_SAFETY_ANALYSIS {
+  ScudoTSD *TSD = getCurrentTSD();
+  DCHECK(TSD && "No TSD associated with the current thread!");
+  *UnlockRequired = true;
+  // Try to lock the currently associated context.
+  if (TSD->tryLock())
+    return TSD;
+  // If it failed, go the slow path.
+  return getTSDAndLockSlow(TSD);
+}
+
+#endif  // !SCUDO_TSD_EXCLUSIVE

diff  --git a/compiler-rt/lib/scudo/scudo_utils.cpp b/compiler-rt/lib/scudo/scudo_utils.cpp
new file mode 100644
index 0000000000000..b0aef752c6793
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_utils.cpp
@@ -0,0 +1,145 @@
+//===-- scudo_utils.cpp -----------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Platform specific utility functions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_utils.h"
+
+#if defined(__x86_64__) || defined(__i386__)
+# include <cpuid.h>
+#elif defined(__arm__) || defined(__aarch64__)
+# include "sanitizer_common/sanitizer_getauxval.h"
+# if SANITIZER_FUCHSIA
+#  include <zircon/syscalls.h>
+#  include <zircon/features.h>
+# elif SANITIZER_POSIX
+#  include "sanitizer_common/sanitizer_posix.h"
+#  include <fcntl.h>
+# endif
+#endif
+
+#include <stdarg.h>
+
+// TODO(kostyak): remove __sanitizer *Printf uses in favor for our own less
+//                complicated string formatting code. The following is a
+//                temporary workaround to be able to use __sanitizer::VSNPrintf.
+namespace __sanitizer {
+
+extern int VSNPrintf(char *buff, int buff_length, const char *format,
+                     va_list args);
+
+}  // namespace __sanitizer
+
+namespace __scudo {
+
+void dieWithMessage(const char *Format, ...) {
+  static const char ScudoError[] = "Scudo ERROR: ";
+  static constexpr uptr PrefixSize = sizeof(ScudoError) - 1;
+  // Our messages are tiny, 256 characters is more than enough.
+  char Message[256];
+  va_list Args;
+  va_start(Args, Format);
+  internal_memcpy(Message, ScudoError, PrefixSize);
+  VSNPrintf(Message + PrefixSize, sizeof(Message) - PrefixSize, Format, Args);
+  va_end(Args);
+  LogMessageOnPrintf(Message);
+  if (common_flags()->abort_on_error)
+    SetAbortMessage(Message);
+  RawWrite(Message);
+  Die();
+}
+
+#if defined(__x86_64__) || defined(__i386__)
+// i386 and x86_64 specific code to detect CRC32 hardware support via CPUID.
+// CRC32 requires the SSE 4.2 instruction set.
+# ifndef bit_SSE4_2
+#  define bit_SSE4_2 bit_SSE42  // clang and gcc have 
diff erent defines.
+# endif
+
+#ifndef signature_HYGON_ebx // They are not defined in gcc.
+// HYGON: "HygonGenuine".
+#define signature_HYGON_ebx 0x6f677948
+#define signature_HYGON_edx 0x6e65476e
+#define signature_HYGON_ecx 0x656e6975
+#endif
+
+bool hasHardwareCRC32() {
+  u32 Eax, Ebx, Ecx, Edx;
+  __get_cpuid(0, &Eax, &Ebx, &Ecx, &Edx);
+  const bool IsIntel = (Ebx == signature_INTEL_ebx) &&
+                       (Edx == signature_INTEL_edx) &&
+                       (Ecx == signature_INTEL_ecx);
+  const bool IsAMD = (Ebx == signature_AMD_ebx) &&
+                     (Edx == signature_AMD_edx) &&
+                     (Ecx == signature_AMD_ecx);
+  const bool IsHygon = (Ebx == signature_HYGON_ebx) &&
+                       (Edx == signature_HYGON_edx) &&
+                       (Ecx == signature_HYGON_ecx);
+  if (!IsIntel && !IsAMD && !IsHygon)
+    return false;
+  __get_cpuid(1, &Eax, &Ebx, &Ecx, &Edx);
+  return !!(Ecx & bit_SSE4_2);
+}
+#elif defined(__arm__) || defined(__aarch64__)
+// For ARM and AArch64, hardware CRC32 support is indicated in the AT_HWCAP
+// auxiliary vector.
+# ifndef AT_HWCAP
+#  define AT_HWCAP 16
+# endif
+# ifndef HWCAP_CRC32
+#  define HWCAP_CRC32 (1 << 7)  // HWCAP_CRC32 is missing on older platforms.
+# endif
+# if SANITIZER_POSIX
+bool hasHardwareCRC32ARMPosix() {
+  uptr F = internal_open("/proc/self/auxv", O_RDONLY);
+  if (internal_iserror(F))
+    return false;
+  struct { uptr Tag; uptr Value; } Entry = { 0, 0 };
+  for (;;) {
+    uptr N = internal_read(F, &Entry, sizeof(Entry));
+    if (internal_iserror(N) || N != sizeof(Entry) ||
+        (Entry.Tag == 0 && Entry.Value == 0) || Entry.Tag == AT_HWCAP)
+      break;
+  }
+  internal_close(F);
+  return (Entry.Tag == AT_HWCAP && (Entry.Value & HWCAP_CRC32) != 0);
+}
+# else
+bool hasHardwareCRC32ARMPosix() { return false; }
+# endif  // SANITIZER_POSIX
+
+// Bionic doesn't initialize its globals early enough. This causes issues when
+// trying to access them from a preinit_array (b/25751302) or from another
+// constructor called before the libc one (b/68046352). __progname is
+// initialized after the other globals, so we can check its value to know if
+// calling getauxval is safe.
+extern "C" SANITIZER_WEAK_ATTRIBUTE char *__progname;
+inline bool areBionicGlobalsInitialized() {
+  return !SANITIZER_ANDROID || (&__progname && __progname);
+}
+
+bool hasHardwareCRC32() {
+#if SANITIZER_FUCHSIA
+  u32 HWCap;
+  zx_status_t Status = zx_system_get_features(ZX_FEATURE_KIND_CPU, &HWCap);
+  if (Status != ZX_OK || (HWCap & ZX_ARM64_FEATURE_ISA_CRC32) == 0)
+    return false;
+  return true;
+#else
+  if (&getauxval && areBionicGlobalsInitialized())
+    return !!(getauxval(AT_HWCAP) & HWCAP_CRC32);
+  return hasHardwareCRC32ARMPosix();
+#endif  // SANITIZER_FUCHSIA
+}
+#else
+bool hasHardwareCRC32() { return false; }
+#endif  // defined(__x86_64__) || defined(__i386__)
+
+}  // namespace __scudo

diff  --git a/compiler-rt/lib/scudo/scudo_utils.h b/compiler-rt/lib/scudo/scudo_utils.h
new file mode 100644
index 0000000000000..5a9b32f0b234e
--- /dev/null
+++ b/compiler-rt/lib/scudo/scudo_utils.h
@@ -0,0 +1,36 @@
+//===-- scudo_utils.h -------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+///
+/// Header for scudo_utils.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_UTILS_H_
+#define SCUDO_UTILS_H_
+
+#include "sanitizer_common/sanitizer_common.h"
+
+#include <string.h>
+
+namespace __scudo {
+
+template <class Dest, class Source>
+inline Dest bit_cast(const Source& source) {
+  static_assert(sizeof(Dest) == sizeof(Source), "Sizes are not equal!");
+  Dest dest;
+  memcpy(&dest, &source, sizeof(dest));
+  return dest;
+}
+
+void dieWithMessage(const char *Format, ...) NORETURN FORMAT(1, 2);
+
+bool hasHardwareCRC32();
+
+}  // namespace __scudo
+
+#endif  // SCUDO_UTILS_H_


        


More information about the llvm-commits mailing list