[compiler-rt] r271968 - [sanitizer] Initial implementation of a Hardened Allocator

Kostya Serebryany via llvm-commits llvm-commits at lists.llvm.org
Mon Jun 6 18:20:27 PDT 2016


Author: kcc
Date: Mon Jun  6 20:20:26 2016
New Revision: 271968

URL: http://llvm.org/viewvc/llvm-project?rev=271968&view=rev
Log:
[sanitizer] Initial implementation of a Hardened Allocator

Summary:
This is an initial implementation of a Hardened Allocator based on Sanitizer Common's CombinedAllocator.
It aims at mitigating heap based vulnerabilities by adding several features to the base allocator, while staying relatively fast.
The following were implemented:
- additional consistency checks on the allocation function parameters and on the heap chunks;
- use of checksum protected chunk header, to detect corruption;
- randomness to the allocator base;
- delayed freelist (quarantine), to mitigate use after free and overall determinism.
Additional mitigations are in the works.

Reviewers: eugenis, aizatsky, pcc, krasin, vitalybuka, glider, dvyukov, kcc

Subscribers: kubabrecka, filcab, llvm-commits

Differential Revision: http://reviews.llvm.org/D20084

Added:
    compiler-rt/trunk/lib/scudo/
    compiler-rt/trunk/lib/scudo/CMakeLists.txt
    compiler-rt/trunk/lib/scudo/scudo_allocator.cpp
    compiler-rt/trunk/lib/scudo/scudo_allocator.h
    compiler-rt/trunk/lib/scudo/scudo_flags.cpp
    compiler-rt/trunk/lib/scudo/scudo_flags.h
    compiler-rt/trunk/lib/scudo/scudo_flags.inc
    compiler-rt/trunk/lib/scudo/scudo_interceptors.cpp
    compiler-rt/trunk/lib/scudo/scudo_new_delete.cpp
    compiler-rt/trunk/lib/scudo/scudo_termination.cpp
    compiler-rt/trunk/lib/scudo/scudo_utils.cpp
    compiler-rt/trunk/lib/scudo/scudo_utils.h
    compiler-rt/trunk/test/scudo/
    compiler-rt/trunk/test/scudo/CMakeLists.txt
    compiler-rt/trunk/test/scudo/alignment.cpp
    compiler-rt/trunk/test/scudo/double-free.cpp
    compiler-rt/trunk/test/scudo/lit.cfg
    compiler-rt/trunk/test/scudo/lit.site.cfg.in
    compiler-rt/trunk/test/scudo/malloc.cpp
    compiler-rt/trunk/test/scudo/memalign.cpp
    compiler-rt/trunk/test/scudo/mismatch.cpp
    compiler-rt/trunk/test/scudo/overflow.cpp
    compiler-rt/trunk/test/scudo/preinit.cpp
    compiler-rt/trunk/test/scudo/quarantine.cpp
    compiler-rt/trunk/test/scudo/realloc.cpp
    compiler-rt/trunk/test/scudo/sized-delete.cpp
    compiler-rt/trunk/test/scudo/sizes.cpp
Modified:
    compiler-rt/trunk/cmake/config-ix.cmake
    compiler-rt/trunk/lib/CMakeLists.txt
    compiler-rt/trunk/test/CMakeLists.txt

Modified: compiler-rt/trunk/cmake/config-ix.cmake
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/cmake/config-ix.cmake?rev=271968&r1=271967&r2=271968&view=diff
==============================================================================
--- compiler-rt/trunk/cmake/config-ix.cmake (original)
+++ compiler-rt/trunk/cmake/config-ix.cmake Mon Jun  6 20:20:26 2016
@@ -158,6 +158,7 @@ set(ALL_UBSAN_SUPPORTED_ARCH ${X86} ${X8
 set(ALL_SAFESTACK_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM64} ${MIPS32} ${MIPS64})
 set(ALL_CFI_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64})
 set(ALL_ESAN_SUPPORTED_ARCH ${X86_64})
+set(ALL_SCUDO_SUPPORTED_ARCH ${X86_64})
 
 if(APPLE)
   include(CompilerRTDarwinUtils)
@@ -344,6 +345,9 @@ if(APPLE)
   list_intersect(ESAN_SUPPORTED_ARCH
     ALL_ESAN_SUPPORTED_ARCH
     SANITIZER_COMMON_SUPPORTED_ARCH)
+  list_intersect(SCUDO_SUPPORTED_ARCH
+    ALL_SCUDO_SUPPORTED_ARCH
+    SANITIZER_COMMON_SUPPORTED_ARCH)
 else()
   # Architectures supported by compiler-rt libraries.
   filter_available_targets(SANITIZER_COMMON_SUPPORTED_ARCH
@@ -365,6 +369,8 @@ else()
     ${ALL_SAFESTACK_SUPPORTED_ARCH})
   filter_available_targets(CFI_SUPPORTED_ARCH ${ALL_CFI_SUPPORTED_ARCH})
   filter_available_targets(ESAN_SUPPORTED_ARCH ${ALL_ESAN_SUPPORTED_ARCH})
+  filter_available_targets(SCUDO_SUPPORTED_ARCH
+    ${ALL_SCUDO_SUPPORTED_ARCH})
 endif()
 
 if (MSVC)
@@ -477,3 +483,11 @@ if (COMPILER_RT_HAS_SANITIZER_COMMON AND
 else()
   set(COMPILER_RT_HAS_ESAN FALSE)
 endif()
+
+if (COMPILER_RT_HAS_SANITIZER_COMMON AND SCUDO_SUPPORTED_ARCH AND
+    OS_NAME MATCHES "Linux")
+  set(COMPILER_RT_HAS_SCUDO TRUE)
+else()
+  set(COMPILER_RT_HAS_SCUDO FALSE)
+endif()
+

Modified: compiler-rt/trunk/lib/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/CMakeLists.txt?rev=271968&r1=271967&r2=271968&view=diff
==============================================================================
--- compiler-rt/trunk/lib/CMakeLists.txt (original)
+++ compiler-rt/trunk/lib/CMakeLists.txt Mon Jun  6 20:20:26 2016
@@ -52,4 +52,8 @@ if(COMPILER_RT_BUILD_SANITIZERS)
   if(COMPILER_RT_HAS_ESAN)
     add_subdirectory(esan)
   endif()
+
+  if(COMPILER_RT_HAS_SCUDO)
+    add_subdirectory(scudo)
+  endif()
 endif()

Added: compiler-rt/trunk/lib/scudo/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/CMakeLists.txt?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/lib/scudo/CMakeLists.txt (added)
+++ compiler-rt/trunk/lib/scudo/CMakeLists.txt Mon Jun  6 20:20:26 2016
@@ -0,0 +1,32 @@
+add_custom_target(scudo)
+
+include_directories(..)
+
+set(SCUDO_CFLAGS ${SANITIZER_COMMON_CFLAGS})
+append_rtti_flag(OFF SCUDO_CFLAGS)
+list(APPEND SCUDO_CFLAGS -msse4.2 -mcx16)
+
+set(SCUDO_SOURCES
+  scudo_allocator.cpp
+  scudo_flags.cpp
+  scudo_interceptors.cpp
+  scudo_new_delete.cpp
+  scudo_termination.cpp
+  scudo_utils.cpp)
+
+if(COMPILER_RT_HAS_SCUDO)
+  foreach(arch ${SCUDO_SUPPORTED_ARCH})
+    add_compiler_rt_runtime(clang_rt.scudo
+      STATIC
+      ARCHS ${arch}
+      SOURCES ${SCUDO_SOURCES}
+              $<TARGET_OBJECTS:RTInterception.${arch}>
+              $<TARGET_OBJECTS:RTSanitizerCommonNoTermination.${arch}>
+              $<TARGET_OBJECTS:RTSanitizerCommonLibc.${arch}>
+      CFLAGS ${SCUDO_CFLAGS}
+      PARENT_TARGET scudo)
+  endforeach()
+endif()
+
+add_dependencies(compiler-rt scudo)
+

Added: compiler-rt/trunk/lib/scudo/scudo_allocator.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/scudo_allocator.cpp?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/lib/scudo/scudo_allocator.cpp (added)
+++ compiler-rt/trunk/lib/scudo/scudo_allocator.cpp Mon Jun  6 20:20:26 2016
@@ -0,0 +1,635 @@
+//===-- scudo_allocator.cpp -------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Scudo Hardened Allocator implementation.
+/// It uses the sanitizer_common allocator as a base and aims at mitigating
+/// heap corruption vulnerabilities. It provides a checksum-guarded chunk
+/// header, a delayed free list, and additional sanity checks.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_allocator.h"
+#include "scudo_utils.h"
+
+#include "sanitizer_common/sanitizer_allocator_interface.h"
+#include "sanitizer_common/sanitizer_quarantine.h"
+
+#include <limits.h>
+#include <pthread.h>
+#include <smmintrin.h>
+
+#include <atomic>
+#include <cstring>
+
+namespace __scudo {
+
+const uptr AllocatorSpace = ~0ULL;
+const uptr AllocatorSize  =  0x10000000000ULL;
+const uptr MinAlignmentLog = 4; // 16 bytes for x64
+const uptr MaxAlignmentLog = 24;
+
+typedef DefaultSizeClassMap SizeClassMap;
+typedef SizeClassAllocator64<AllocatorSpace, AllocatorSize, 0, SizeClassMap>
+  PrimaryAllocator;
+typedef SizeClassAllocatorLocalCache<PrimaryAllocator> AllocatorCache;
+typedef LargeMmapAllocator<> SecondaryAllocator;
+typedef CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
+  ScudoAllocator;
+
+static ScudoAllocator &getAllocator();
+
+static thread_local Xorshift128Plus Prng;
+// Global static cookie, initialized at start-up.
+static u64 Cookie;
+
+enum ChunkState : u8 {
+  ChunkAvailable  = 0,
+  ChunkAllocated  = 1,
+  ChunkQuarantine = 2
+};
+
+typedef unsigned __int128 PackedHeader;
+typedef std::atomic<PackedHeader> AtomicPackedHeader;
+
+// Our header requires 128-bit of storage on x64 (the only platform supported
+// as of now), which fits nicely with the alignment requirements.
+// Having the offset saves us from using functions such as GetBlockBegin, that
+// is fairly costly. Our first implementation used the MetaData as well, which
+// offers the advantage of being stored away from the chunk itself, but
+// accessing it was costly as well.
+// The header will be atomically loaded and stored using the 16-byte primitives
+// offered by the platform (likely requires cmpxchg16b support).
+struct UnpackedHeader {
+  // 1st 8 bytes
+  u16 Checksum      : 16;
+  u64 RequestedSize : 40; // Needed for reallocation purposes.
+  u8  State         : 2;  // available, allocated, or quarantined
+  u8  AllocType     : 2;  // malloc, new, new[], or memalign
+  u8  Unused_0_     : 4;
+  // 2nd 8 bytes
+  u64 Offset        : 20; // Offset from the beginning of the backend
+                          // allocation to the beginning chunk itself, in
+                          // multiples of MinAlignment. See comment about its
+                          // maximum value and test in Initialize.
+  u64 Unused_1_     : 28;
+  u16 Salt          : 16;
+};
+
+COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader));
+
+const uptr ChunkHeaderSize = sizeof(PackedHeader);
+
+struct ScudoChunk : UnpackedHeader {
+  // We can't use the offset member of the chunk itself, as we would double
+  // fetch it without any warranty that it wouldn't have been tampered. To
+  // prevent this, we work with a local copy of the header.
+  void *AllocBeg(UnpackedHeader *Header) {
+    return reinterpret_cast<void *>(
+        reinterpret_cast<uptr>(this) - (Header->Offset << MinAlignmentLog));
+  }
+
+  // CRC32 checksum of the Chunk pointer and its ChunkHeader.
+  // It currently uses the Intel Nehalem SSE4.2 crc32 64-bit instruction.
+  u16 Checksum(UnpackedHeader *Header) const {
+    u64 HeaderHolder[2];
+    memcpy(HeaderHolder, Header, sizeof(HeaderHolder));
+    u64 Crc = _mm_crc32_u64(Cookie, reinterpret_cast<uptr>(this));
+    // This is somewhat of a shortcut. The checksum is stored in the 16 least
+    // significant bits of the first 8 bytes of the header, hence zero-ing
+    // those bits out. It would be more valid to zero the checksum field of the
+    // UnpackedHeader, but would require holding an additional copy of it.
+    Crc = _mm_crc32_u64(Crc, HeaderHolder[0] & 0xffffffffffff0000ULL);
+    Crc = _mm_crc32_u64(Crc, HeaderHolder[1]);
+    return static_cast<u16>(Crc);
+  }
+
+  // Loads and unpacks the header, verifying the checksum in the process.
+  void loadHeader(UnpackedHeader *NewUnpackedHeader) const {
+    const AtomicPackedHeader *AtomicHeader =
+        reinterpret_cast<const AtomicPackedHeader *>(this);
+    PackedHeader NewPackedHeader =
+        AtomicHeader->load(std::memory_order_relaxed);
+    *NewUnpackedHeader = bit_cast<UnpackedHeader>(NewPackedHeader);
+    if ((NewUnpackedHeader->Unused_0_ != 0) ||
+        (NewUnpackedHeader->Unused_1_ != 0) ||
+        (NewUnpackedHeader->Checksum != Checksum(NewUnpackedHeader))) {
+      dieWithMessage("ERROR: corrupted chunk header at address %p\n", this);
+    }
+  }
+
+  // Packs and stores the header, computing the checksum in the process.
+  void storeHeader(UnpackedHeader *NewUnpackedHeader) {
+    NewUnpackedHeader->Checksum = Checksum(NewUnpackedHeader);
+    PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
+    AtomicPackedHeader *AtomicHeader =
+        reinterpret_cast<AtomicPackedHeader *>(this);
+    AtomicHeader->store(NewPackedHeader, std::memory_order_relaxed);
+  }
+
+  // Packs and stores the header, computing the checksum in the process. We
+  // compare the current header with the expected provided one to ensure that
+  // we are not being raced by a corruption occurring in another thread.
+  void compareExchangeHeader(UnpackedHeader *NewUnpackedHeader,
+                             UnpackedHeader *OldUnpackedHeader) {
+    NewUnpackedHeader->Checksum = Checksum(NewUnpackedHeader);
+    PackedHeader NewPackedHeader = bit_cast<PackedHeader>(*NewUnpackedHeader);
+    PackedHeader OldPackedHeader = bit_cast<PackedHeader>(*OldUnpackedHeader);
+    AtomicPackedHeader *AtomicHeader =
+        reinterpret_cast<AtomicPackedHeader *>(this);
+    if (!AtomicHeader->compare_exchange_strong(OldPackedHeader,
+                                               NewPackedHeader,
+                                               std::memory_order_relaxed,
+                                               std::memory_order_relaxed)) {
+      dieWithMessage("ERROR: race on chunk header at address %p\n", this);
+    }
+  }
+};
+
+static bool ScudoInitIsRunning = false;
+
+static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT;
+static pthread_key_t pkey;
+
+static thread_local bool ThreadInited = false;
+static thread_local bool ThreadTornDown = false;
+static thread_local AllocatorCache Cache;
+
+static void teardownThread(void *p) {
+  uptr v = reinterpret_cast<uptr>(p);
+  // The glibc POSIX thread-local-storage deallocation routine calls user
+  // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS.
+  // We want to be called last since other destructors might call free and the
+  // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the
+  // quarantine and swallowing the cache.
+  if (v < PTHREAD_DESTRUCTOR_ITERATIONS) {
+    pthread_setspecific(pkey, reinterpret_cast<void *>(v + 1));
+    return;
+  }
+  drainQuarantine();
+  getAllocator().DestroyCache(&Cache);
+  ThreadTornDown = true;
+}
+
+static void initInternal() {
+  SanitizerToolName = "Scudo";
+  CHECK(!ScudoInitIsRunning && "Scudo init calls itself!");
+  ScudoInitIsRunning = true;
+
+  initFlags();
+
+  AllocatorOptions Options;
+  Options.setFrom(getFlags(), common_flags());
+  initAllocator(Options);
+
+  ScudoInitIsRunning = false;
+}
+
+static void initGlobal() {
+  pthread_key_create(&pkey, teardownThread);
+  initInternal();
+}
+
+static void NOINLINE initThread() {
+  pthread_once(&GlobalInited, initGlobal);
+  pthread_setspecific(pkey, reinterpret_cast<void *>(1));
+  getAllocator().InitCache(&Cache);
+  ThreadInited = true;
+}
+
+struct QuarantineCallback {
+  explicit QuarantineCallback(AllocatorCache *Cache)
+    : Cache_(Cache) {}
+
+  // Chunk recycling function, returns a quarantined chunk to the backend.
+  void Recycle(ScudoChunk *Chunk) {
+    UnpackedHeader Header;
+    Chunk->loadHeader(&Header);
+    if (Header.State != ChunkQuarantine) {
+      dieWithMessage("ERROR: invalid chunk state when recycling address %p\n",
+                     Chunk);
+    }
+    void *Ptr = Chunk->AllocBeg(&Header);
+    getAllocator().Deallocate(Cache_, Ptr);
+  }
+
+  /// Internal quarantine allocation and deallocation functions.
+  void *Allocate(uptr Size) {
+    // The internal quarantine memory cannot be protected by us. But the only
+    // structures allocated are QuarantineBatch, that are 8KB for x64. So we
+    // will use mmap for those, and given that Deallocate doesn't pass a size
+    // in, we enforce the size of the allocation to be sizeof(QuarantineBatch).
+    // TODO(kostyak): switching to mmap impacts greatly performances, we have
+    //                to find another solution
+    // CHECK_EQ(Size, sizeof(QuarantineBatch));
+    // return MmapOrDie(Size, "QuarantineBatch");
+    return getAllocator().Allocate(Cache_, Size, 1, false);
+  }
+
+  void Deallocate(void *Ptr) {
+    // UnmapOrDie(Ptr, sizeof(QuarantineBatch));
+    getAllocator().Deallocate(Cache_, Ptr);
+  }
+
+  AllocatorCache *Cache_;
+};
+
+typedef Quarantine<QuarantineCallback, ScudoChunk> ScudoQuarantine;
+typedef ScudoQuarantine::Cache QuarantineCache;
+static thread_local QuarantineCache ThreadQuarantineCache;
+
+void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) {
+  MayReturnNull = cf->allocator_may_return_null;
+  QuarantineSizeMb = f->QuarantineSizeMb;
+  ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb;
+  DeallocationTypeMismatch = f->DeallocationTypeMismatch;
+  DeleteSizeMismatch = f->DeleteSizeMismatch;
+  ZeroContents = f->ZeroContents;
+}
+
+void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const {
+  cf->allocator_may_return_null = MayReturnNull;
+  f->QuarantineSizeMb = QuarantineSizeMb;
+  f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb;
+  f->DeallocationTypeMismatch = DeallocationTypeMismatch;
+  f->DeleteSizeMismatch = DeleteSizeMismatch;
+  f->ZeroContents = ZeroContents;
+}
+
+struct Allocator {
+  static const uptr MaxAllowedMallocSize = 1ULL << 40;
+  static const uptr MinAlignment = 1 << MinAlignmentLog;
+  static const uptr MaxAlignment = 1 << MaxAlignmentLog; // 16 MB
+
+  ScudoAllocator BackendAllocator;
+  ScudoQuarantine AllocatorQuarantine;
+
+  // The fallback caches are used when the thread local caches have been
+  // 'detroyed' on thread tear-down. They are protected by a Mutex as they can
+  // be accessed by different threads.
+  StaticSpinMutex FallbackMutex;
+  AllocatorCache FallbackAllocatorCache;
+  QuarantineCache FallbackQuarantineCache;
+
+  bool DeallocationTypeMismatch;
+  bool ZeroContents;
+  bool DeleteSizeMismatch;
+
+  explicit Allocator(LinkerInitialized)
+    : AllocatorQuarantine(LINKER_INITIALIZED),
+      FallbackQuarantineCache(LINKER_INITIALIZED) {}
+
+  void init(const AllocatorOptions &Options) {
+    // Currently SSE 4.2 support is required. This might change later.
+    CHECK(testCPUFeature(SSE4_2)); // for crc32
+
+    // Verify that the header offset field can hold the maximum offset. In the
+    // worst case scenario, the backend allocation is already aligned on
+    // MaxAlignment, so in order to store the header and still be aligned, we
+    // add an extra MaxAlignment. As a result, the offset from the beginning of
+    // the backend allocation to the chunk will be MaxAlignment -
+    // ChunkHeaderSize.
+    UnpackedHeader Header = {};
+    uptr MaximumOffset = (MaxAlignment - ChunkHeaderSize) >> MinAlignmentLog;
+    Header.Offset = MaximumOffset;
+    if (Header.Offset != MaximumOffset) {
+      dieWithMessage("ERROR: the maximum possible offset doesn't fit in the "
+                     "header\n");
+    }
+
+    DeallocationTypeMismatch = Options.DeallocationTypeMismatch;
+    DeleteSizeMismatch = Options.DeleteSizeMismatch;
+    ZeroContents = Options.ZeroContents;
+    BackendAllocator.Init(Options.MayReturnNull);
+    AllocatorQuarantine.Init(static_cast<uptr>(Options.QuarantineSizeMb) << 20,
+                             static_cast<uptr>(
+                                 Options.ThreadLocalQuarantineSizeKb) << 10);
+    BackendAllocator.InitCache(&FallbackAllocatorCache);
+    Cookie = Prng.Next();
+  }
+
+  // Allocates a chunk.
+  void *allocate(uptr Size, uptr Alignment, AllocType Type) {
+    if (UNLIKELY(!ThreadInited))
+      initThread();
+    if (!IsPowerOfTwo(Alignment)) {
+      dieWithMessage("ERROR: malloc alignment is not a power of 2\n");
+    }
+    if (Alignment > MaxAlignment)
+      return BackendAllocator.ReturnNullOrDie();
+    if (Alignment < MinAlignment)
+      Alignment = MinAlignment;
+    if (Size == 0)
+      Size = 1;
+    if (Size >= MaxAllowedMallocSize)
+      return BackendAllocator.ReturnNullOrDie();
+    uptr RoundedSize = RoundUpTo(Size, MinAlignment);
+    uptr ExtraBytes = ChunkHeaderSize;
+    if (Alignment > MinAlignment)
+      ExtraBytes += Alignment;
+    uptr NeededSize = RoundedSize + ExtraBytes;
+    if (NeededSize >= MaxAllowedMallocSize)
+      return BackendAllocator.ReturnNullOrDie();
+
+    void *Ptr;
+    if (LIKELY(!ThreadTornDown)) {
+      Ptr = BackendAllocator.Allocate(&Cache, NeededSize, MinAlignment);
+    } else {
+      SpinMutexLock l(&FallbackMutex);
+      Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize,
+                               MinAlignment);
+    }
+    if (!Ptr)
+      return BackendAllocator.ReturnNullOrDie();
+
+    // If requested, we will zero out the entire contents of the returned chunk.
+    if (ZeroContents && BackendAllocator.FromPrimary(Ptr))
+       memset(Ptr, 0, BackendAllocator.GetActuallyAllocatedSize(Ptr));
+
+    uptr AllocBeg = reinterpret_cast<uptr>(Ptr);
+    uptr ChunkBeg = AllocBeg + ChunkHeaderSize;
+    if (!IsAligned(ChunkBeg, Alignment))
+      ChunkBeg = RoundUpTo(ChunkBeg, Alignment);
+    CHECK_LE(ChunkBeg + Size, AllocBeg + NeededSize);
+    ScudoChunk *Chunk =
+        reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
+    UnpackedHeader Header = {};
+    Header.State = ChunkAllocated;
+    Header.Offset = (ChunkBeg - ChunkHeaderSize - AllocBeg) >> MinAlignmentLog;
+    Header.AllocType = Type;
+    Header.RequestedSize = Size;
+    Header.Salt = static_cast<u16>(Prng.Next());
+    Chunk->storeHeader(&Header);
+    void *UserPtr = reinterpret_cast<void *>(ChunkBeg);
+    // TODO(kostyak): hooks sound like a terrible idea security wise but might
+    //                be needed for things to work properly?
+    // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size);
+    return UserPtr;
+  }
+
+  // Deallocates a Chunk, which means adding it to the delayed free list (or
+  // Quarantine).
+  void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) {
+    if (UNLIKELY(!ThreadInited))
+      initThread();
+    // TODO(kostyak): see hook comment above
+    // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr);
+    if (!UserPtr)
+      return;
+    uptr ChunkBeg = reinterpret_cast<uptr>(UserPtr);
+    if (!IsAligned(ChunkBeg, MinAlignment)) {
+      dieWithMessage("ERROR: attempted to deallocate a chunk not properly "
+                     "aligned at address %p\n", UserPtr);
+    }
+    ScudoChunk *Chunk =
+        reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
+    UnpackedHeader OldHeader;
+    Chunk->loadHeader(&OldHeader);
+    if (OldHeader.State != ChunkAllocated) {
+      dieWithMessage("ERROR: invalid chunk state when deallocating address "
+                     "%p\n", Chunk);
+    }
+    UnpackedHeader NewHeader = OldHeader;
+    NewHeader.State = ChunkQuarantine;
+    Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
+    if (DeallocationTypeMismatch) {
+      // The deallocation type has to match the allocation one.
+      if (NewHeader.AllocType != Type) {
+        // With the exception of memalign'd Chunks, that can be still be free'd.
+        if (NewHeader.AllocType != FromMemalign || Type != FromMalloc) {
+          dieWithMessage("ERROR: allocation type mismatch on address %p\n",
+                         Chunk);
+        }
+      }
+    }
+    uptr Size = NewHeader.RequestedSize;
+    if (DeleteSizeMismatch) {
+      if (DeleteSize && DeleteSize != Size) {
+        dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n",
+                       Chunk);
+      }
+    }
+    if (LIKELY(!ThreadTornDown)) {
+      AllocatorQuarantine.Put(&ThreadQuarantineCache,
+                              QuarantineCallback(&Cache), Chunk, Size);
+    } else {
+      SpinMutexLock l(&FallbackMutex);
+      AllocatorQuarantine.Put(&FallbackQuarantineCache,
+                              QuarantineCallback(&FallbackAllocatorCache),
+                              Chunk, Size);
+    }
+  }
+
+  // Returns the actual usable size of a chunk. Since this requires loading the
+  // header, we will return it in the second parameter, as it can be required
+  // by the caller to perform additional processing.
+  uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) {
+    if (UNLIKELY(!ThreadInited))
+      initThread();
+    if (!Ptr)
+      return 0;
+    uptr ChunkBeg = reinterpret_cast<uptr>(Ptr);
+    ScudoChunk *Chunk =
+        reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
+    Chunk->loadHeader(Header);
+    // Getting the usable size of a chunk only makes sense if it's allocated.
+    if (Header->State != ChunkAllocated) {
+      dieWithMessage("ERROR: attempted to size a non-allocated chunk at "
+                     "address %p\n", Chunk);
+    }
+    uptr Size =
+        BackendAllocator.GetActuallyAllocatedSize(Chunk->AllocBeg(Header));
+    // UsableSize works as malloc_usable_size, which is also what (AFAIU)
+    // tcmalloc's MallocExtension::GetAllocatedSize aims at providing. This
+    // means we will return the size of the chunk from the user beginning to
+    // the end of the 'user' allocation, hence us subtracting the header size
+    // and the offset from the size.
+    if (Size == 0)
+      return Size;
+    return Size - ChunkHeaderSize - (Header->Offset << MinAlignmentLog);
+  }
+
+  // Helper function that doesn't care about the header.
+  uptr getUsableSize(const void *Ptr) {
+    UnpackedHeader Header;
+    return getUsableSize(Ptr, &Header);
+  }
+
+  // Reallocates a chunk. We can save on a new allocation if the new requested
+  // size still fits in the chunk.
+  void *reallocate(void *OldPtr, uptr NewSize) {
+    if (UNLIKELY(!ThreadInited))
+      initThread();
+    UnpackedHeader OldHeader;
+    uptr Size = getUsableSize(OldPtr, &OldHeader);
+    uptr ChunkBeg = reinterpret_cast<uptr>(OldPtr);
+    ScudoChunk *Chunk =
+        reinterpret_cast<ScudoChunk *>(ChunkBeg - ChunkHeaderSize);
+    if (OldHeader.AllocType != FromMalloc) {
+      dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n",
+                     Chunk);
+    }
+    UnpackedHeader NewHeader = OldHeader;
+    // The new size still fits in the current chunk.
+    if (NewSize <= Size) {
+      NewHeader.RequestedSize = NewSize;
+      Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
+      return OldPtr;
+    }
+    // Otherwise, we have to allocate a new chunk and copy the contents of the
+    // old one.
+    void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc);
+    if (NewPtr) {
+      uptr OldSize = OldHeader.RequestedSize;
+      memcpy(NewPtr, OldPtr, Min(NewSize, OldSize));
+      NewHeader.State = ChunkQuarantine;
+      Chunk->compareExchangeHeader(&NewHeader, &OldHeader);
+      if (LIKELY(!ThreadTornDown)) {
+        AllocatorQuarantine.Put(&ThreadQuarantineCache,
+                                QuarantineCallback(&Cache), Chunk, OldSize);
+      } else {
+        SpinMutexLock l(&FallbackMutex);
+        AllocatorQuarantine.Put(&FallbackQuarantineCache,
+                                QuarantineCallback(&FallbackAllocatorCache),
+                                Chunk, OldSize);
+      }
+    }
+    return NewPtr;
+  }
+
+  void *calloc(uptr NMemB, uptr Size) {
+    if (UNLIKELY(!ThreadInited))
+      initThread();
+    uptr Total = NMemB * Size;
+    if (Size != 0 && Total / Size != NMemB) // Overflow check
+      return BackendAllocator.ReturnNullOrDie();
+    void *Ptr = allocate(Total, MinAlignment, FromMalloc);
+    // If ZeroContents, the content of the chunk has already been zero'd out.
+    if (!ZeroContents && Ptr && BackendAllocator.FromPrimary(Ptr))
+      memset(Ptr, 0, getUsableSize(Ptr));
+    return Ptr;
+  }
+
+  void drainQuarantine() {
+    AllocatorQuarantine.Drain(&ThreadQuarantineCache,
+                              QuarantineCallback(&Cache));
+  }
+};
+
+static Allocator Instance(LINKER_INITIALIZED);
+
+static ScudoAllocator &getAllocator() {
+  return Instance.BackendAllocator;
+}
+
+void initAllocator(const AllocatorOptions &Options) {
+  Instance.init(Options);
+}
+
+void drainQuarantine() {
+  Instance.drainQuarantine();
+}
+
+void *scudoMalloc(uptr Size, AllocType Type) {
+  return Instance.allocate(Size, Allocator::MinAlignment, Type);
+}
+
+void scudoFree(void *Ptr, AllocType Type) {
+  Instance.deallocate(Ptr, 0, Type);
+}
+
+void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) {
+  Instance.deallocate(Ptr, Size, Type);
+}
+
+void *scudoRealloc(void *Ptr, uptr Size) {
+  if (!Ptr)
+    return Instance.allocate(Size, Allocator::MinAlignment, FromMalloc);
+  if (Size == 0) {
+    Instance.deallocate(Ptr, 0, FromMalloc);
+    return nullptr;
+  }
+  return Instance.reallocate(Ptr, Size);
+}
+
+void *scudoCalloc(uptr NMemB, uptr Size) {
+  return Instance.calloc(NMemB, Size);
+}
+
+void *scudoValloc(uptr Size) {
+  return Instance.allocate(Size, GetPageSizeCached(), FromMemalign);
+}
+
+void *scudoMemalign(uptr Alignment, uptr Size) {
+  return Instance.allocate(Size, Alignment, FromMemalign);
+}
+
+void *scudoPvalloc(uptr Size) {
+  uptr PageSize = GetPageSizeCached();
+  Size = RoundUpTo(Size, PageSize);
+  if (Size == 0) {
+    // pvalloc(0) should allocate one page.
+    Size = PageSize;
+  }
+  return Instance.allocate(Size, PageSize, FromMemalign);
+}
+
+int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) {
+  *MemPtr = Instance.allocate(Size, Alignment, FromMemalign);
+  return 0;
+}
+
+void *scudoAlignedAlloc(uptr Alignment, uptr Size) {
+  // size must be a multiple of the alignment. To avoid a division, we first
+  // make sure that alignment is a power of 2.
+  CHECK(IsPowerOfTwo(Alignment));
+  CHECK_EQ((Size & (Alignment - 1)), 0);
+  return Instance.allocate(Size, Alignment, FromMalloc);
+}
+
+uptr scudoMallocUsableSize(void *Ptr) {
+  return Instance.getUsableSize(Ptr);
+}
+
+} // namespace __scudo
+
+using namespace __scudo;
+
+// MallocExtension helper functions
+
+uptr __sanitizer_get_current_allocated_bytes() {
+  uptr stats[AllocatorStatCount];
+  getAllocator().GetStats(stats);
+  return stats[AllocatorStatAllocated];
+}
+
+uptr __sanitizer_get_heap_size() {
+  uptr stats[AllocatorStatCount];
+  getAllocator().GetStats(stats);
+  return stats[AllocatorStatMapped];
+}
+
+uptr __sanitizer_get_free_bytes() {
+  return 1;
+}
+
+uptr __sanitizer_get_unmapped_bytes() {
+  return 1;
+}
+
+uptr __sanitizer_get_estimated_allocated_size(uptr size) {
+  return size;
+}
+
+int __sanitizer_get_ownership(const void *p) {
+  return Instance.getUsableSize(p) != 0;
+}
+
+uptr __sanitizer_get_allocated_size(const void *p) {
+  return Instance.getUsableSize(p);
+}

Added: compiler-rt/trunk/lib/scudo/scudo_allocator.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/scudo_allocator.h?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/lib/scudo/scudo_allocator.h (added)
+++ compiler-rt/trunk/lib/scudo/scudo_allocator.h Mon Jun  6 20:20:26 2016
@@ -0,0 +1,63 @@
+//===-- scudo_allocator.h ---------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Header for scudo_allocator.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_ALLOCATOR_H_
+#define SCUDO_ALLOCATOR_H_
+
+#ifndef __x86_64__
+# error "The Scudo hardened allocator currently only supports x86_64."
+#endif
+
+#include "scudo_flags.h"
+
+#include "sanitizer_common/sanitizer_allocator.h"
+
+namespace __scudo {
+
+enum AllocType : u8 {
+  FromMalloc    = 0, // Memory block came from malloc, realloc, calloc, etc.
+  FromNew       = 1, // Memory block came from operator new.
+  FromNewArray  = 2, // Memory block came from operator new [].
+  FromMemalign  = 3, // Memory block came from memalign, posix_memalign, etc.
+};
+
+struct AllocatorOptions {
+  u32 QuarantineSizeMb;
+  u32 ThreadLocalQuarantineSizeKb;
+  bool MayReturnNull;
+  bool DeallocationTypeMismatch;
+  bool DeleteSizeMismatch;
+  bool ZeroContents;
+
+  void setFrom(const Flags *f, const CommonFlags *cf);
+  void copyTo(Flags *f, CommonFlags *cf) const;
+};
+
+void initAllocator(const AllocatorOptions &options);
+void drainQuarantine();
+
+void *scudoMalloc(uptr Size, AllocType Type);
+void scudoFree(void *Ptr, AllocType Type);
+void scudoSizedFree(void *Ptr, uptr Size, AllocType Type);
+void *scudoRealloc(void *Ptr, uptr Size);
+void *scudoCalloc(uptr NMemB, uptr Size);
+void *scudoMemalign(uptr Alignment, uptr Size);
+void *scudoValloc(uptr Size);
+void *scudoPvalloc(uptr Size);
+int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size);
+void *scudoAlignedAlloc(uptr Alignment, uptr Size);
+uptr scudoMallocUsableSize(void *Ptr);
+
+} // namespace __scudo
+
+#endif  // SCUDO_ALLOCATOR_H_

Added: compiler-rt/trunk/lib/scudo/scudo_flags.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/scudo_flags.cpp?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/lib/scudo/scudo_flags.cpp (added)
+++ compiler-rt/trunk/lib/scudo/scudo_flags.cpp Mon Jun  6 20:20:26 2016
@@ -0,0 +1,81 @@
+//===-- scudo_flags.cpp -----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Hardened Allocator flag parsing logic.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_flags.h"
+#include "scudo_utils.h"
+
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+
+namespace __scudo {
+
+Flags scudo_flags_dont_use_directly;  // use via flags().
+
+void Flags::setDefaults() {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
+#include "scudo_flags.inc"
+#undef SCUDO_FLAG
+}
+
+static void RegisterScudoFlags(FlagParser *parser, Flags *f) {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) \
+  RegisterFlag(parser, #Name, Description, &f->Name);
+#include "scudo_flags.inc"
+#undef SCUDO_FLAG
+}
+
+void initFlags() {
+  SetCommonFlagsDefaults();
+  {
+    CommonFlags cf;
+    cf.CopyFrom(*common_flags());
+    cf.exitcode = 1;
+    OverrideCommonFlags(cf);
+  }
+  Flags *f = getFlags();
+  f->setDefaults();
+
+  FlagParser scudo_parser;
+  RegisterScudoFlags(&scudo_parser, f);
+  RegisterCommonFlags(&scudo_parser);
+
+  scudo_parser.ParseString(GetEnv("SCUDO_OPTIONS"));
+
+  InitializeCommonFlags();
+
+  // Sanity checks and default settings for the Quarantine parameters.
+
+  if (f->QuarantineSizeMb < 0) {
+    const int DefaultQuarantineSizeMb = 64;
+    f->QuarantineSizeMb = DefaultQuarantineSizeMb;
+  }
+  // We enforce an upper limit for the quarantine size of 4Gb.
+  if (f->QuarantineSizeMb > (4 * 1024)) {
+    dieWithMessage("ERROR: the quarantine size is too large\n");
+  }
+  if (f->ThreadLocalQuarantineSizeKb < 0) {
+    const int DefaultThreadLocalQuarantineSizeKb = 1024;
+    f->ThreadLocalQuarantineSizeKb = DefaultThreadLocalQuarantineSizeKb;
+  }
+  // And an upper limit of 128Mb for the thread quarantine cache.
+  if (f->ThreadLocalQuarantineSizeKb > (128 * 1024)) {
+    dieWithMessage("ERROR: the per thread quarantine cache size is too "
+                   "large\n");
+  }
+}
+
+Flags *getFlags() {
+  return &scudo_flags_dont_use_directly;
+}
+
+}

Added: compiler-rt/trunk/lib/scudo/scudo_flags.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/scudo_flags.h?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/lib/scudo/scudo_flags.h (added)
+++ compiler-rt/trunk/lib/scudo/scudo_flags.h Mon Jun  6 20:20:26 2016
@@ -0,0 +1,33 @@
+//===-- scudo_flags.h -------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Header for scudo_flags.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAGS_H_
+#define SCUDO_FLAGS_H_
+
+namespace __scudo {
+
+struct Flags {
+#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Type Name;
+#include "scudo_flags.inc"
+#undef SCUDO_FLAG
+
+  void setDefaults();
+};
+
+Flags *getFlags();
+
+void initFlags();
+
+} // namespace __scudo
+
+#endif  // SCUDO_FLAGS_H_

Added: compiler-rt/trunk/lib/scudo/scudo_flags.inc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/scudo_flags.inc?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/lib/scudo/scudo_flags.inc (added)
+++ compiler-rt/trunk/lib/scudo/scudo_flags.inc Mon Jun  6 20:20:26 2016
@@ -0,0 +1,35 @@
+//===-- scudo_flags.inc -----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Hardened Allocator runtime flags.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_FLAG
+# error "Define SCUDO_FLAG prior to including this file!"
+#endif
+
+SCUDO_FLAG(int, QuarantineSizeMb, 64,
+           "Size (in Mb) of quarantine used to delay the actual deallocation "
+           "of chunks. Lower value may reduce memory usage but decrease the "
+           "effectiveness of the mitigation.")
+
+SCUDO_FLAG(int, ThreadLocalQuarantineSizeKb, 1024,
+          "Size (in Kb) of per-thread cache used to offload the global "
+          "quarantine. Lower value may reduce memory usage but might increase "
+          "the contention on the global quarantine.")
+
+SCUDO_FLAG(bool, DeallocationTypeMismatch, true,
+          "Report errors on malloc/delete, new/free, new/delete[], etc.")
+
+SCUDO_FLAG(bool, DeleteSizeMismatch, true,
+           "Report errors on mismatch between size of new and delete.")
+
+SCUDO_FLAG(bool, ZeroContents, false,
+          "Zero chunk contents on allocation and deallocation.")

Added: compiler-rt/trunk/lib/scudo/scudo_interceptors.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/scudo_interceptors.cpp?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/lib/scudo/scudo_interceptors.cpp (added)
+++ compiler-rt/trunk/lib/scudo/scudo_interceptors.cpp Mon Jun  6 20:20:26 2016
@@ -0,0 +1,75 @@
+//===-- scudo_interceptors.cpp ----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Linux specific malloc interception functions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_platform.h"
+#if SANITIZER_LINUX
+
+#include "scudo_allocator.h"
+
+#include "interception/interception.h"
+
+using namespace __scudo;
+
+INTERCEPTOR(void, free, void *ptr) {
+  scudoFree(ptr, FromMalloc);
+}
+
+INTERCEPTOR(void, cfree, void *ptr) {
+  scudoFree(ptr, FromMalloc);
+}
+
+INTERCEPTOR(void*, malloc, uptr size) {
+  return scudoMalloc(size, FromMalloc);
+}
+
+INTERCEPTOR(void*, realloc, void *ptr, uptr size) {
+  return scudoRealloc(ptr, size);
+}
+
+INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) {
+  return scudoCalloc(nmemb, size);
+}
+
+INTERCEPTOR(void*, valloc, uptr size) {
+  return scudoValloc(size);
+}
+
+INTERCEPTOR(void*, memalign, uptr alignment, uptr size) {
+  return scudoMemalign(alignment, size);
+}
+
+INTERCEPTOR(void*, __libc_memalign, uptr alignment, uptr size) {
+  return scudoMemalign(alignment, size);
+}
+
+INTERCEPTOR(void*, pvalloc, uptr size) {
+  return scudoPvalloc(size);
+}
+
+INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) {
+  return scudoAlignedAlloc(alignment, size);
+}
+
+INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) {
+  return scudoPosixMemalign(memptr, alignment, size);
+}
+
+INTERCEPTOR(uptr, malloc_usable_size, void *ptr) {
+  return scudoMallocUsableSize(ptr);
+}
+
+INTERCEPTOR(int, mallopt, int cmd, int value) {
+  return -1;
+}
+
+#endif // SANITIZER_LINUX

Added: compiler-rt/trunk/lib/scudo/scudo_new_delete.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/scudo_new_delete.cpp?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/lib/scudo/scudo_new_delete.cpp (added)
+++ compiler-rt/trunk/lib/scudo/scudo_new_delete.cpp Mon Jun  6 20:20:26 2016
@@ -0,0 +1,69 @@
+//===-- scudo_new_delete.cpp ------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Interceptors for operators new and delete.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_allocator.h"
+
+#include "interception/interception.h"
+
+#include <cstddef>
+
+using namespace __scudo;
+
+#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE
+
+// Fake std::nothrow_t to avoid including <new>.
+namespace std {
+struct nothrow_t {};
+} // namespace std
+
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size) {
+  return scudoMalloc(size, FromNew);
+}
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size) {
+  return scudoMalloc(size, FromNewArray);
+}
+CXX_OPERATOR_ATTRIBUTE
+void *operator new(size_t size, std::nothrow_t const&) {
+  return scudoMalloc(size, FromNew);
+}
+CXX_OPERATOR_ATTRIBUTE
+void *operator new[](size_t size, std::nothrow_t const&) {
+  return scudoMalloc(size, FromNewArray);
+}
+
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr) NOEXCEPT {
+  return scudoFree(ptr, FromNew);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr) NOEXCEPT {
+  return scudoFree(ptr, FromNewArray);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, std::nothrow_t const&) NOEXCEPT {
+  return scudoFree(ptr, FromNew);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, std::nothrow_t const&) NOEXCEPT {
+  return scudoFree(ptr, FromNewArray);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete(void *ptr, size_t size) NOEXCEPT {
+  scudoSizedFree(ptr, size, FromNew);
+}
+CXX_OPERATOR_ATTRIBUTE
+void operator delete[](void *ptr, size_t size) NOEXCEPT {
+  scudoSizedFree(ptr, size, FromNewArray);
+}

Added: compiler-rt/trunk/lib/scudo/scudo_termination.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/scudo_termination.cpp?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/lib/scudo/scudo_termination.cpp (added)
+++ compiler-rt/trunk/lib/scudo/scudo_termination.cpp Mon Jun  6 20:20:26 2016
@@ -0,0 +1,41 @@
+//===-- scudo_termination.cpp -----------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// This file contains bare-bones termination functions to replace the
+/// __sanitizer ones, in order to avoid any potential abuse of the callbacks
+/// functionality.
+///
+//===----------------------------------------------------------------------===//
+
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __sanitizer {
+
+bool AddDieCallback(DieCallbackType callback) { return true; }
+
+bool RemoveDieCallback(DieCallbackType callback) { return true; }
+
+void SetUserDieCallback(DieCallbackType callback) {}
+
+void NORETURN Die() {
+  if (common_flags()->abort_on_error)
+    Abort();
+  internal__exit(common_flags()->exitcode);
+}
+
+void SetCheckFailedCallback(CheckFailedCallbackType callback) {}
+
+void NORETURN CheckFailed(const char *file, int line, const char *cond,
+                          u64 v1, u64 v2) {
+  Report("Sanitizer CHECK failed: %s:%d %s (%lld, %lld)\n", file, line, cond,
+                                                            v1, v2);
+  Die();
+}
+
+} // namespace __sanitizer

Added: compiler-rt/trunk/lib/scudo/scudo_utils.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/scudo_utils.cpp?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/lib/scudo/scudo_utils.cpp (added)
+++ compiler-rt/trunk/lib/scudo/scudo_utils.cpp Mon Jun  6 20:20:26 2016
@@ -0,0 +1,133 @@
+//===-- scudo_utils.cpp -----------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Platform specific utility functions.
+///
+//===----------------------------------------------------------------------===//
+
+#include "scudo_utils.h"
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdarg.h>
+#include <unistd.h>
+
+#include <cstring>
+
+// TODO(kostyak): remove __sanitizer *Printf uses in favor for our own less
+//                complicated string formatting code. The following is a
+//                temporary workaround to be able to use __sanitizer::VSNPrintf.
+namespace __sanitizer {
+
+extern int VSNPrintf(char *buff, int buff_length, const char *format,
+                     va_list args);
+
+} // namespace __sanitizer
+
+namespace __scudo {
+
+FORMAT(1, 2)
+void dieWithMessage(const char *Format, ...) {
+  // Our messages are tiny, 128 characters is more than enough.
+  char Message[128];
+  va_list Args;
+  va_start(Args, Format);
+  __sanitizer::VSNPrintf(Message, sizeof(Message), Format, Args);
+  va_end(Args);
+  RawWrite(Message);
+  Die();
+}
+
+typedef struct {
+  u32 Eax;
+  u32 Ebx;
+  u32 Ecx;
+  u32 Edx;
+} CPUIDInfo;
+
+static void getCPUID(CPUIDInfo *info, u32 leaf, u32 subleaf)
+{
+  asm volatile("cpuid"
+      : "=a" (info->Eax), "=b" (info->Ebx), "=c" (info->Ecx), "=d" (info->Edx)
+      : "a" (leaf), "c" (subleaf)
+  );
+}
+
+// Returns true is the CPU is a "GenuineIntel" or "AuthenticAMD"
+static bool isSupportedCPU()
+{
+  CPUIDInfo Info;
+
+  getCPUID(&Info, 0, 0);
+  if (memcmp(reinterpret_cast<char *>(&Info.Ebx), "Genu", 4) == 0 &&
+      memcmp(reinterpret_cast<char *>(&Info.Edx), "ineI", 4) == 0 &&
+      memcmp(reinterpret_cast<char *>(&Info.Ecx), "ntel", 4) == 0) {
+      return true;
+  }
+  if (memcmp(reinterpret_cast<char *>(&Info.Ebx), "Auth", 4) == 0 &&
+      memcmp(reinterpret_cast<char *>(&Info.Edx), "enti", 4) == 0 &&
+      memcmp(reinterpret_cast<char *>(&Info.Ecx), "cAMD", 4) == 0) {
+      return true;
+  }
+  return false;
+}
+
+bool testCPUFeature(CPUFeature feature)
+{
+  static bool InfoInitialized = false;
+  static CPUIDInfo CPUInfo = {};
+
+  if (InfoInitialized == false) {
+    if (isSupportedCPU() == true)
+      getCPUID(&CPUInfo, 1, 0);
+    else
+      UNIMPLEMENTED();
+    InfoInitialized = true;
+  }
+  switch (feature) {
+    case SSE4_2:
+      return ((CPUInfo.Ecx >> 20) & 0x1) != 0;
+    default:
+      break;
+  }
+  return false;
+}
+
+// readRetry will attempt to read Count bytes from the Fd specified, and if
+// interrupted will retry to read additional bytes to reach Count.
+static ssize_t readRetry(int Fd, u8 *Buffer, size_t Count) {
+  ssize_t AmountRead = 0;
+  while (static_cast<size_t>(AmountRead) < Count) {
+    ssize_t Result = read(Fd, Buffer + AmountRead, Count - AmountRead);
+    if (Result > 0)
+      AmountRead += Result;
+    else if (!Result)
+      break;
+    else if (errno != EINTR) {
+      AmountRead = -1;
+      break;
+    }
+  }
+  return AmountRead;
+}
+
+// Default constructor for Xorshift128Plus seeds the state with /dev/urandom
+Xorshift128Plus::Xorshift128Plus() {
+  int Fd = open("/dev/urandom", O_RDONLY);
+  bool Success = readRetry(Fd, reinterpret_cast<u8 *>(&State_0_),
+                           sizeof(State_0_)) == sizeof(State_0_);
+  Success &= readRetry(Fd, reinterpret_cast<u8 *>(&State_1_),
+                           sizeof(State_1_)) == sizeof(State_1_);
+  close(Fd);
+  if (!Success) {
+    dieWithMessage("ERROR: failed to read enough data from /dev/urandom.\n");
+  }
+}
+
+} // namespace __scudo

Added: compiler-rt/trunk/lib/scudo/scudo_utils.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/scudo/scudo_utils.h?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/lib/scudo/scudo_utils.h (added)
+++ compiler-rt/trunk/lib/scudo/scudo_utils.h Mon Jun  6 20:20:26 2016
@@ -0,0 +1,59 @@
+//===-- scudo_utils.h -------------------------------------------*- C++ -*-===//
+//
+//                     The LLVM Compiler Infrastructure
+//
+// This file is distributed under the University of Illinois Open Source
+// License. See LICENSE.TXT for details.
+//
+//===----------------------------------------------------------------------===//
+///
+/// Header for scudo_utils.cpp.
+///
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_UTILS_H_
+#define SCUDO_UTILS_H_
+
+#include <string.h>
+
+#include "sanitizer_common/sanitizer_common.h"
+
+namespace __scudo {
+
+template <class Dest, class Source>
+inline Dest bit_cast(const Source& source) {
+  static_assert(sizeof(Dest) == sizeof(Source), "Sizes are not equal!");
+  Dest dest;
+  memcpy(&dest, &source, sizeof(dest));
+  return dest;
+}
+
+void dieWithMessage(const char *Format, ...);
+
+enum  CPUFeature {
+  SSE4_2 = 0,
+  ENUM_CPUFEATURE_MAX
+};
+bool testCPUFeature(CPUFeature feature);
+
+// Tiny PRNG based on https://en.wikipedia.org/wiki/Xorshift#xorshift.2B
+// The state (128 bits) will be stored in thread local storage.
+struct Xorshift128Plus {
+ public:
+  Xorshift128Plus();
+  u64 Next() {
+    u64 x = State_0_;
+    const u64 y = State_1_;
+    State_0_ = y;
+    x ^= x << 23;
+    State_1_ = x ^ y ^ (x >> 17) ^ (y >> 26);
+    return State_1_ + y;
+  }
+ private:
+  u64 State_0_;
+  u64 State_1_;
+};
+
+} // namespace __scudo
+
+#endif  // SCUDO_UTILS_H_

Modified: compiler-rt/trunk/test/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/CMakeLists.txt?rev=271968&r1=271967&r2=271968&view=diff
==============================================================================
--- compiler-rt/trunk/test/CMakeLists.txt (original)
+++ compiler-rt/trunk/test/CMakeLists.txt Mon Jun  6 20:20:26 2016
@@ -73,6 +73,9 @@ if(COMPILER_RT_CAN_EXECUTE_TESTS)
   if(COMPILER_RT_HAS_ESAN)
     add_subdirectory(esan)
   endif()
+  if(COMPILER_RT_HAS_SCUDO)
+    add_subdirectory(scudo)
+  endif()
 endif()
 
 if(COMPILER_RT_STANDALONE_BUILD)

Added: compiler-rt/trunk/test/scudo/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/scudo/CMakeLists.txt?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/test/scudo/CMakeLists.txt (added)
+++ compiler-rt/trunk/test/scudo/CMakeLists.txt Mon Jun  6 20:20:26 2016
@@ -0,0 +1,28 @@
+set(SCUDO_LIT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR})
+set(SCUDO_LIT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR})
+
+
+set(SCUDO_TEST_DEPS ${SANITIZER_COMMON_LIT_TEST_DEPS})
+if(NOT COMPILER_RT_STANDALONE_BUILD)
+  list(APPEND SCUDO_TEST_DEPS scudo)
+endif()
+
+configure_lit_site_cfg(
+  ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in
+  ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg
+  )
+
+if(CMAKE_SYSTEM_NAME MATCHES "Linux")
+   EXEC_PROGRAM(cat ARGS "/proc/cpuinfo" OUTPUT_VARIABLE CPUINFO)
+   STRING(REGEX REPLACE "^.*(sse4_2).*$" "\\1" SSE_THERE ${CPUINFO})
+   STRING(COMPARE EQUAL "sse4_2" "${SSE_THERE}" SSE42_TRUE)
+endif(CMAKE_SYSTEM_NAME MATCHES "Linux")
+
+if (SSE42_TRUE AND CMAKE_SIZEOF_VOID_P EQUAL 8)
+  add_lit_testsuite(check-scudo
+    "Running the Scudo Hardened Allocator tests"
+    ${CMAKE_CURRENT_BINARY_DIR}
+    DEPENDS ${SCUDO_TEST_DEPS})
+  set_target_properties(check-scudo PROPERTIES FOLDER
+    "Scudo Hardened Allocator tests")
+endif(SSE42_TRUE AND CMAKE_SIZEOF_VOID_P EQUAL 8)

Added: compiler-rt/trunk/test/scudo/alignment.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/scudo/alignment.cpp?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/test/scudo/alignment.cpp (added)
+++ compiler-rt/trunk/test/scudo/alignment.cpp Mon Jun  6 20:20:26 2016
@@ -0,0 +1,25 @@
+// RUN: %clang_scudo %s -o %t
+// RUN: not %run %t pointers 2>&1 | FileCheck %s
+
+// Tests that a non-16-byte aligned pointer will trigger the associated error
+// on deallocation.
+
+#include <assert.h>
+#include <malloc.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+
+int main(int argc, char **argv)
+{
+  assert(argc == 2);
+  if (!strcmp(argv[1], "pointers")) {
+    void *p = malloc(1U << 16);
+    if (!p)
+      return 1;
+    free(reinterpret_cast<void *>(reinterpret_cast<uintptr_t>(p) | 8));
+  }
+  return 0;
+}
+
+// CHECK: ERROR: attempted to deallocate a chunk not properly aligned

Added: compiler-rt/trunk/test/scudo/double-free.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/scudo/double-free.cpp?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/test/scudo/double-free.cpp (added)
+++ compiler-rt/trunk/test/scudo/double-free.cpp Mon Jun  6 20:20:26 2016
@@ -0,0 +1,49 @@
+// RUN: %clang_scudo %s -o %t
+// RUN: not %run %t malloc   2>&1 | FileCheck %s
+// RUN: not %run %t new      2>&1 | FileCheck %s
+// RUN: not %run %t newarray 2>&1 | FileCheck %s
+// RUN: not %run %t memalign 2>&1 | FileCheck %s
+
+// Tests double-free error on pointers allocated with different allocation
+// functions.
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+int main(int argc, char **argv)
+{
+  assert(argc == 2);
+  if (!strcmp(argv[1], "malloc")) {
+    void *p = malloc(sizeof(int));
+    if (!p)
+      return 1;
+    free(p);
+    free(p);
+  }
+  if (!strcmp(argv[1], "new")) {
+    int *p = new int;
+    if (!p)
+      return 1;
+    delete p;
+    delete p;
+  }
+  if (!strcmp(argv[1], "newarray")) {
+    int *p = new int[8];
+    if (!p)
+      return 1;
+    delete[] p;
+    delete[] p;
+  }
+  if (!strcmp(argv[1], "memalign")) {
+    void *p = nullptr;
+    posix_memalign(&p, 0x100, sizeof(int));
+    if (!p)
+      return 1;
+    free(p);
+    free(p);
+  }
+  return 0;
+}
+
+// CHECK: ERROR: invalid chunk state when deallocating address

Added: compiler-rt/trunk/test/scudo/lit.cfg
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/scudo/lit.cfg?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/test/scudo/lit.cfg (added)
+++ compiler-rt/trunk/test/scudo/lit.cfg Mon Jun  6 20:20:26 2016
@@ -0,0 +1,39 @@
+# -*- Python -*-
+
+import os
+
+# Setup config name.
+config.name = 'Scudo'
+
+# Setup source root.
+config.test_source_root = os.path.dirname(__file__)
+
+# Path to the static library
+base_lib = os.path.join(config.compiler_rt_libdir,
+                        "libclang_rt.scudo-%s.a" % config.target_arch)
+whole_archive = "-Wl,-whole-archive %s -Wl,-no-whole-archive " % base_lib
+
+# Test suffixes.
+config.suffixes = ['.c', '.cc', '.cpp', '.m', '.mm', '.ll', '.test']
+
+# C flags.
+c_flags = ["-std=c++11",
+           "-lstdc++",
+           "-ldl",
+           "-lrt",
+           "-pthread",
+           "-latomic",
+           "-fPIE",
+           "-pie",
+           "-O0"]
+
+def build_invocation(compile_flags):                                            
+  return " " + " ".join([config.clang] + compile_flags) + " "                   
+
+# Add clang substitutions.
+config.substitutions.append( ("%clang_scudo ",
+                              build_invocation(c_flags) + whole_archive) )
+
+# Hardened Allocator tests are currently supported on Linux only.
+if config.host_os not in ['Linux']:
+   config.unsupported = True

Added: compiler-rt/trunk/test/scudo/lit.site.cfg.in
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/scudo/lit.site.cfg.in?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/test/scudo/lit.site.cfg.in (added)
+++ compiler-rt/trunk/test/scudo/lit.site.cfg.in Mon Jun  6 20:20:26 2016
@@ -0,0 +1,7 @@
+ at LIT_SITE_CFG_IN_HEADER@
+
+# Load common config for all compiler-rt lit tests.
+lit_config.load_config(config, "@COMPILER_RT_BINARY_DIR@/test/lit.common.configured")
+
+# Load tool-specific config that would do the real work.
+lit_config.load_config(config, "@SCUDO_LIT_SOURCE_DIR@/lit.cfg")

Added: compiler-rt/trunk/test/scudo/malloc.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/scudo/malloc.cpp?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/test/scudo/malloc.cpp (added)
+++ compiler-rt/trunk/test/scudo/malloc.cpp Mon Jun  6 20:20:26 2016
@@ -0,0 +1,27 @@
+// RUN: %clang_scudo %s -o %t
+// RUN: %run %t 2>&1
+
+// Tests that a regular workflow of allocation, memory fill and free works as
+// intended. Also tests that a zero-sized allocation succeeds.
+
+#include <malloc.h>
+#include <stdlib.h>
+#include <string.h>
+
+int main(int argc, char **argv)
+{
+  void *p;
+  size_t size = 1U << 8;
+
+  p = malloc(size);
+  if (!p)
+    return 1;
+  memset(p, 'A', size);
+  free(p);
+  p = malloc(0);
+  if (!p)
+    return 1;
+  free(p);
+
+  return 0;
+}

Added: compiler-rt/trunk/test/scudo/memalign.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/scudo/memalign.cpp?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/test/scudo/memalign.cpp (added)
+++ compiler-rt/trunk/test/scudo/memalign.cpp Mon Jun  6 20:20:26 2016
@@ -0,0 +1,42 @@
+// RUN: %clang_scudo %s -o %t
+// RUN:     %run %t valid   2>&1
+// RUN: not %run %t invalid 2>&1 | FileCheck %s
+
+// Tests that the various aligned allocation functions work as intended. Also
+// tests for the condition where the alignment is not a power of 2.
+
+#include <assert.h>
+#include <malloc.h>
+#include <stdlib.h>
+#include <string.h>
+
+int main(int argc, char **argv)
+{
+  void *p;
+  size_t alignment = 1U << 12;
+  size_t size = alignment;
+
+  assert(argc == 2);
+  if (!strcmp(argv[1], "valid")) {
+    p = memalign(alignment, size);
+    if (!p)
+      return 1;
+    free(p);
+    p = nullptr;
+    posix_memalign(&p, alignment, size);
+    if (!p)
+      return 1;
+    free(p);
+    p = aligned_alloc(alignment, size);
+    if (!p)
+      return 1;
+    free(p);
+  }
+  if (!strcmp(argv[1], "invalid")) {
+    p = memalign(alignment - 1, size);
+    free(p);
+  }
+  return 0;
+}
+
+// CHECK: ERROR: malloc alignment is not a power of 2

Added: compiler-rt/trunk/test/scudo/mismatch.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/scudo/mismatch.cpp?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/test/scudo/mismatch.cpp (added)
+++ compiler-rt/trunk/test/scudo/mismatch.cpp Mon Jun  6 20:20:26 2016
@@ -0,0 +1,41 @@
+// RUN: %clang_scudo %s -o %t
+// RUN: SCUDO_OPTIONS=DeallocationTypeMismatch=1 not %run %t mallocdel   2>&1 | FileCheck %s
+// RUN: SCUDO_OPTIONS=DeallocationTypeMismatch=0     %run %t mallocdel   2>&1
+// RUN: SCUDO_OPTIONS=DeallocationTypeMismatch=1 not %run %t newfree     2>&1 | FileCheck %s
+// RUN: SCUDO_OPTIONS=DeallocationTypeMismatch=0     %run %t newfree     2>&1
+// RUN: SCUDO_OPTIONS=DeallocationTypeMismatch=1 not %run %t memaligndel 2>&1 | FileCheck %s
+// RUN: SCUDO_OPTIONS=DeallocationTypeMismatch=0     %run %t memaligndel 2>&1
+
+// Tests that type mismatches between allocation and deallocation functions are
+// caught when the related option is set.
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+#include <malloc.h>
+
+int main(int argc, char **argv)
+{
+  assert(argc == 2);
+  if (!strcmp(argv[1], "mallocdel")) {
+    int *p = (int *)malloc(16);
+    if (!p)
+      return 1;
+    delete p;
+  }
+  if (!strcmp(argv[1], "newfree")) {
+    int *p = new int;
+    if (!p)
+      return 1;
+    free((void *)p);
+  }
+  if (!strcmp(argv[1], "memaligndel")) {
+    int *p = (int *)memalign(0x10, 0x10);
+    if (!p)
+      return 1;
+    delete p;
+  }
+  return 0;
+}
+
+// CHECK: ERROR: allocation type mismatch on address

Added: compiler-rt/trunk/test/scudo/overflow.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/scudo/overflow.cpp?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/test/scudo/overflow.cpp (added)
+++ compiler-rt/trunk/test/scudo/overflow.cpp Mon Jun  6 20:20:26 2016
@@ -0,0 +1,38 @@
+// RUN: %clang_scudo %s -o %t
+// RUN:                                  not %run %t malloc     2>&1 | FileCheck %s
+// RUN: SCUDO_OPTIONS=QuarantineSizeMb=1 not %run %t quarantine 2>&1 | FileCheck %s
+
+// Tests that header corruption of an allocated or quarantined chunk is caught.
+
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+int main(int argc, char **argv)
+{
+  assert(argc == 2);
+  if (!strcmp(argv[1], "malloc")) {
+    // Simulate a header corruption of an allocated chunk (1-bit)
+    void *p = malloc(1U << 4);
+    if (!p)
+      return 1;
+    ((char *)p)[-1] ^= 1;
+    free(p);
+  }
+  if (!strcmp(argv[1], "quarantine")) {
+    void *p = malloc(1U << 4);
+    if (!p)
+      return 1;
+    free(p);
+    // Simulate a header corruption of a quarantined chunk
+    ((char *)p)[-2] ^= 1;
+    // Trigger the quarantine recycle
+    for (int i = 0; i < 0x100; i++) {
+      p = malloc(1U << 16);
+      free(p);
+    }
+  }
+  return 0;
+}
+
+// CHECK: ERROR: corrupted chunk header at address

Added: compiler-rt/trunk/test/scudo/preinit.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/scudo/preinit.cpp?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/test/scudo/preinit.cpp (added)
+++ compiler-rt/trunk/test/scudo/preinit.cpp Mon Jun  6 20:20:26 2016
@@ -0,0 +1,38 @@
+// RUN: %clang_scudo %s -o %t
+// RUN: %run %t 2>&1
+
+// Verifies that calling malloc in a preinit_array function succeeds, and that
+// the resulting pointer can be freed at program termination.
+
+#include <malloc.h>
+#include <stdlib.h>
+#include <string.h>
+
+static void *global_p = nullptr;
+
+void __init(void) {
+  global_p = malloc(1);
+  if (!global_p)
+    exit(1);
+}
+
+void __fini(void) {
+  if (global_p)
+    free(global_p);
+}
+
+int main(int argc, char **argv)
+{
+  void *p = malloc(1);
+  if (!p)
+    return 1;
+  free(p);
+
+  return 0;
+}
+
+__attribute__((section(".preinit_array"), used))
+  void (*__local_preinit)(void) = __init;
+__attribute__((section(".fini_array"), used))
+  void (*__local_fini)(void) = __fini;
+

Added: compiler-rt/trunk/test/scudo/quarantine.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/scudo/quarantine.cpp?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/test/scudo/quarantine.cpp (added)
+++ compiler-rt/trunk/test/scudo/quarantine.cpp Mon Jun  6 20:20:26 2016
@@ -0,0 +1,43 @@
+// RUN: %clang_scudo %s -o %t
+// RUN: SCUDO_OPTIONS=QuarantineSizeMb=1 %run %t 2>&1
+
+// Tests that the quarantine prevents a chunk from being reused right away.
+// Also tests that a chunk will eventually become available again for
+// allocation when the recycling criteria has been met.
+
+#include <malloc.h>
+#include <stdlib.h>
+#include <string.h>
+
+int main(int argc, char **argv)
+{
+  void *p, *old_p;
+  size_t size = 1U << 16;
+
+  // The delayed freelist will prevent a chunk from being available right away
+  p = malloc(size);
+  if (!p)
+    return 1;
+  old_p = p;
+  free(p);
+  p = malloc(size);
+  if (!p)
+    return 1;
+  if (old_p == p)
+    return 1;
+  free(p);
+
+  // Eventually the chunk should become available again
+  bool found = false;
+  for (int i = 0; i < 0x100 && found == false; i++) {
+    p = malloc(size);
+    if (!p)
+      return 1;
+    found = (p == old_p);
+    free(p);
+  }
+  if (found == false)
+    return 1;
+
+  return 0;
+}

Added: compiler-rt/trunk/test/scudo/realloc.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/scudo/realloc.cpp?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/test/scudo/realloc.cpp (added)
+++ compiler-rt/trunk/test/scudo/realloc.cpp Mon Jun  6 20:20:26 2016
@@ -0,0 +1,69 @@
+// RUN: %clang_scudo %s -o %t
+// RUN:     %run %t pointers 2>&1
+// RUN:     %run %t contents 2>&1
+// RUN: not %run %t memalign 2>&1 | FileCheck %s
+
+// Tests that our reallocation function returns the same pointer when the
+// requested size can fit into the previously allocated chunk. Also tests that
+// a new chunk is returned if the size is greater, and that the contents of the
+// chunk are left unchanged.
+// As a final test, make sure that a chunk allocated by memalign cannot be
+// reallocated.
+
+#include <assert.h>
+#include <malloc.h>
+#include <string.h>
+
+int main(int argc, char **argv)
+{
+  void *p, *old_p;
+  size_t size = 32;
+
+  assert(argc == 2);
+  if (!strcmp(argv[1], "pointers")) {
+    old_p = p = realloc(nullptr, size);
+    if (!p)
+      return 1;
+    size = malloc_usable_size(p);
+    // Our realloc implementation will return the same pointer if the size
+    // requested is lower or equal to the usable size of the associated chunk.
+    p = realloc(p, size - 1);
+    if (p != old_p)
+      return 1;
+    p = realloc(p, size);
+    if (p != old_p)
+      return 1;
+    // And a new one if the size is greater.
+    p = realloc(p, size + 1);
+    if (p == old_p)
+      return 1;
+    // A size of 0 will free the chunk and return nullptr.
+    p = realloc(p, 0);
+    if (p)
+      return 1;
+    old_p = nullptr;
+  }
+  if (!strcmp(argv[1], "contents")) {
+    p = realloc(nullptr, size);
+    if (!p)
+      return 1;
+    for (int i = 0; i < size; i++)
+      reinterpret_cast<char *>(p)[i] = 'A';
+    p = realloc(p, size + 1);
+    // The contents of the reallocated chunk must match the original one.
+    for (int i = 0; i < size; i++)
+      if (reinterpret_cast<char *>(p)[i] != 'A')
+        return 1;
+  }
+  if (!strcmp(argv[1], "memalign")) {
+    // A chunk coming from memalign cannot be reallocated.
+    p = memalign(16, size);
+    if (!p)
+      return 1;
+    p = realloc(p, size);
+    free(p);
+  }
+  return 0;
+}
+
+// CHECK: ERROR: invalid chunk type when reallocating address

Added: compiler-rt/trunk/test/scudo/sized-delete.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/scudo/sized-delete.cpp?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/test/scudo/sized-delete.cpp (added)
+++ compiler-rt/trunk/test/scudo/sized-delete.cpp Mon Jun  6 20:20:26 2016
@@ -0,0 +1,40 @@
+// RUN: %clang_scudo -fsized-deallocation %s -o %t
+// RUN: SCUDO_OPTIONS=DeleteSizeMismatch=1     %run %t gooddel    2>&1
+// RUN: SCUDO_OPTIONS=DeleteSizeMismatch=1 not %run %t baddel     2>&1 | FileCheck %s
+// RUN: SCUDO_OPTIONS=DeleteSizeMismatch=0     %run %t baddel     2>&1
+// RUN: SCUDO_OPTIONS=DeleteSizeMismatch=1     %run %t gooddelarr 2>&1
+// RUN: SCUDO_OPTIONS=DeleteSizeMismatch=1 not %run %t baddelarr  2>&1 | FileCheck %s
+// RUN: SCUDO_OPTIONS=DeleteSizeMismatch=0     %run %t baddelarr  2>&1
+
+// Ensures that the sized delete operator errors out when the appropriate
+// option is passed and the sizes do not match between allocation and
+// deallocation functions.
+
+#include <new>
+#include <assert.h>
+#include <stdlib.h>
+#include <string.h>
+
+int main(int argc, char **argv)
+{
+  assert(argc == 2);
+  if (!strcmp(argv[1], "gooddel")) {
+    long long *p = new long long;
+    operator delete(p, sizeof(long long));
+  }
+  if (!strcmp(argv[1], "baddel")) {
+    long long *p = new long long;
+    operator delete(p, 2);
+  }
+  if (!strcmp(argv[1], "gooddelarr")) {
+    char *p = new char[64];
+    operator delete[](p, 64);
+  }
+  if (!strcmp(argv[1], "baddelarr")) {
+    char *p = new char[63];
+    operator delete[](p, 64);
+  }
+  return 0;
+}
+
+// CHECK: ERROR: invalid sized delete on chunk at address

Added: compiler-rt/trunk/test/scudo/sizes.cpp
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/test/scudo/sizes.cpp?rev=271968&view=auto
==============================================================================
--- compiler-rt/trunk/test/scudo/sizes.cpp (added)
+++ compiler-rt/trunk/test/scudo/sizes.cpp Mon Jun  6 20:20:26 2016
@@ -0,0 +1,61 @@
+// RUN: %clang_scudo %s -o %t
+// RUN: SCUDO_OPTIONS=allocator_may_return_null=0 not %run %t malloc 2>&1 | FileCheck %s
+// RUN: SCUDO_OPTIONS=allocator_may_return_null=1     %run %t malloc 2>&1
+// RUN: SCUDO_OPTIONS=allocator_may_return_null=0 not %run %t calloc 2>&1 | FileCheck %s
+// RUN: SCUDO_OPTIONS=allocator_may_return_null=1     %run %t calloc 2>&1
+// RUN:                                               %run %t usable 2>&1
+
+// Tests for various edge cases related to sizes, notably the maximum size the
+// allocator can allocate. Tests that an integer overflow in the parameters of
+// calloc is caught.
+
+#include <assert.h>
+#include <malloc.h>
+#include <stdlib.h>
+#include <string.h>
+
+#include <limits>
+
+int main(int argc, char **argv)
+{
+  assert(argc == 2);
+  if (!strcmp(argv[1], "malloc")) {
+    // Currently the maximum size the allocator can allocate is 1ULL<<40 bytes.
+    size_t size = std::numeric_limits<size_t>::max();
+    void *p = malloc(size);
+    if (p)
+      return 1;
+    size = (1ULL << 40) - 16;
+    p = malloc(size);
+    if (p)
+      return 1;
+  }
+  if (!strcmp(argv[1], "calloc")) {
+    // Trigger an overflow in calloc.
+    size_t size = std::numeric_limits<size_t>::max();
+    void *p = calloc((size / 0x1000) + 1, 0x1000);
+    if (p)
+      return 1;
+  }
+  if (!strcmp(argv[1], "usable")) {
+    // Playing with the actual usable size of a chunk.
+    void *p = malloc(1007);
+    if (!p)
+      return 1;
+    size_t size = malloc_usable_size(p);
+    if (size < 1007)
+      return 1;
+    memset(p, 'A', size);
+    p = realloc(p, 2014);
+    if (!p)
+      return 1;
+    size = malloc_usable_size(p);
+    if (size < 2014)
+      return 1;
+    memset(p, 'B', size);
+    free(p);
+  }
+  return 0;
+}
+
+// CHECK: allocator is terminating the process




More information about the llvm-commits mailing list