[llvm-commits] [compiler-rt] r169368 - in /compiler-rt/trunk/lib: asan/asan_interceptors.cc sanitizer_common/sanitizer_allocator.h sanitizer_common/sanitizer_allocator64.h sanitizer_common/tests/CMakeLists.txt sanitizer_common/tests/sanitizer_allocator64_test.cc sanitizer_common/tests/sanitizer_allocator64_testlib.cc sanitizer_common/tests/sanitizer_allocator_test.cc tsan/rtl/tsan_rtl.h
Kostya Serebryany
kcc at google.com
Wed Dec 5 02:09:16 PST 2012
Author: kcc
Date: Wed Dec 5 04:09:15 2012
New Revision: 169368
URL: http://llvm.org/viewvc/llvm-project?rev=169368&view=rev
Log:
[tsan] get rid of *allocator64* files, moving everything to *allocator* files. This will help with the 32-bit allocator implementation and testing
Removed:
compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h
compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
Modified:
compiler-rt/trunk/lib/asan/asan_interceptors.cc
compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h
compiler-rt/trunk/lib/sanitizer_common/tests/CMakeLists.txt
compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc
compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h
Modified: compiler-rt/trunk/lib/asan/asan_interceptors.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/asan/asan_interceptors.cc?rev=169368&r1=169367&r2=169368&view=diff
==============================================================================
--- compiler-rt/trunk/lib/asan/asan_interceptors.cc (original)
+++ compiler-rt/trunk/lib/asan/asan_interceptors.cc Wed Dec 5 04:09:15 2012
@@ -179,6 +179,8 @@
#if ASAN_INTERCEPT___CXA_THROW
INTERCEPTOR(void, __cxa_throw, void *a, void *b, void *c) {
+ Printf("__asan's __cxa_throw %p; REAL(__cxa_throw) %p PLAIN %p\n",
+ __interceptor___cxa_throw, REAL(__cxa_throw), __cxa_throw);
CHECK(REAL(__cxa_throw));
__asan_handle_no_return();
REAL(__cxa_throw)(a, b, c);
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h?rev=169368&r1=169367&r2=169368&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h Wed Dec 5 04:09:15 2012
@@ -99,6 +99,208 @@
typedef IntrusiveList<AllocatorListNode> AllocatorFreeList;
+// SizeClassAllocator64 -- allocator for 64-bit address space.
+//
+// Space: a portion of address space of kSpaceSize bytes starting at
+// a fixed address (kSpaceBeg). Both constants are powers of two and
+// kSpaceBeg is kSpaceSize-aligned.
+//
+// Region: a part of Space dedicated to a single size class.
+// There are kNumClasses Regions of equal size.
+//
+// UserChunk: a piece of memory returned to user.
+// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
+//
+// A Region looks like this:
+// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
+template <const uptr kSpaceBeg, const uptr kSpaceSize,
+ const uptr kMetadataSize, class SizeClassMap>
+class SizeClassAllocator64 {
+ public:
+ void Init() {
+ CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
+ AllocBeg(), AllocSize())));
+ }
+
+ bool CanAllocate(uptr size, uptr alignment) {
+ return size <= SizeClassMap::kMaxSize &&
+ alignment <= SizeClassMap::kMaxSize;
+ }
+
+ void *Allocate(uptr size, uptr alignment) {
+ CHECK(CanAllocate(size, alignment));
+ return AllocateBySizeClass(SizeClassMap::ClassID(size));
+ }
+
+ void Deallocate(void *p) {
+ CHECK(PointerIsMine(p));
+ DeallocateBySizeClass(p, GetSizeClass(p));
+ }
+
+ // Allocate several chunks of the given class_id.
+ void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
+ CHECK_LT(class_id, kNumClasses);
+ RegionInfo *region = GetRegionInfo(class_id);
+ SpinMutexLock l(®ion->mutex);
+ if (region->free_list.empty()) {
+ PopulateFreeList(class_id, region);
+ }
+ CHECK(!region->free_list.empty());
+ uptr count = SizeClassMap::MaxCached(class_id);
+ if (region->free_list.size() <= count) {
+ free_list->append_front(®ion->free_list);
+ } else {
+ for (uptr i = 0; i < count; i++) {
+ AllocatorListNode *node = region->free_list.front();
+ region->free_list.pop_front();
+ free_list->push_front(node);
+ }
+ }
+ CHECK(!free_list->empty());
+ }
+
+ // Swallow the entire free_list for the given class_id.
+ void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
+ CHECK_LT(class_id, kNumClasses);
+ RegionInfo *region = GetRegionInfo(class_id);
+ SpinMutexLock l(®ion->mutex);
+ region->free_list.append_front(free_list);
+ }
+
+ static bool PointerIsMine(void *p) {
+ return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
+ }
+
+ static uptr GetSizeClass(void *p) {
+ return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
+ }
+
+ static void *GetBlockBegin(void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = SizeClassMap::Size(class_id);
+ uptr chunk_idx = GetChunkIdx((uptr)p, size);
+ uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
+ uptr begin = reg_beg + chunk_idx * size;
+ return (void*)begin;
+ }
+
+ static uptr GetActuallyAllocatedSize(void *p) {
+ CHECK(PointerIsMine(p));
+ return SizeClassMap::Size(GetSizeClass(p));
+ }
+
+ uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
+
+ void *GetMetaData(void *p) {
+ uptr class_id = GetSizeClass(p);
+ uptr size = SizeClassMap::Size(class_id);
+ uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
+ return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
+ (1 + chunk_idx) * kMetadataSize);
+ }
+
+ uptr TotalMemoryUsed() {
+ uptr res = 0;
+ for (uptr i = 0; i < kNumClasses; i++)
+ res += GetRegionInfo(i)->allocated_user;
+ return res;
+ }
+
+ // Test-only.
+ void TestOnlyUnmap() {
+ UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
+ }
+
+ static uptr AllocBeg() { return kSpaceBeg; }
+ static uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
+
+ typedef SizeClassMap SizeClassMapT;
+ static const uptr kNumClasses = SizeClassMap::kNumClasses; // 2^k <= 256
+
+ private:
+ static const uptr kRegionSize = kSpaceSize / kNumClasses;
+ COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
+ COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
+ // kRegionSize must be >= 2^32.
+ COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
+ // Populate the free list with at most this number of bytes at once
+ // or with one element if its size is greater.
+ static const uptr kPopulateSize = 1 << 18;
+
+ struct RegionInfo {
+ SpinMutex mutex;
+ AllocatorFreeList free_list;
+ uptr allocated_user; // Bytes allocated for user memory.
+ uptr allocated_meta; // Bytes allocated for metadata.
+ char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(AllocatorFreeList)];
+ };
+ COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
+
+ static uptr AdditionalSize() {
+ uptr PageSize = GetPageSizeCached();
+ uptr res = Max(sizeof(RegionInfo) * kNumClasses, PageSize);
+ CHECK_EQ(res % PageSize, 0);
+ return res;
+ }
+
+ RegionInfo *GetRegionInfo(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
+ return ®ions[class_id];
+ }
+
+ static uptr GetChunkIdx(uptr chunk, uptr size) {
+ u32 offset = chunk % kRegionSize;
+ // Here we divide by a non-constant. This is costly.
+ // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
+ // We save 2x by using 32-bit div, but may need to use a 256-way switch.
+ return offset / (u32)size;
+ }
+
+ void PopulateFreeList(uptr class_id, RegionInfo *region) {
+ uptr size = SizeClassMap::Size(class_id);
+ uptr beg_idx = region->allocated_user;
+ uptr end_idx = beg_idx + kPopulateSize;
+ region->free_list.clear();
+ uptr region_beg = kSpaceBeg + kRegionSize * class_id;
+ uptr idx = beg_idx;
+ uptr i = 0;
+ do { // do-while loop because we need to put at least one item.
+ uptr p = region_beg + idx;
+ region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
+ idx += size;
+ i++;
+ } while (idx < end_idx);
+ region->allocated_user += idx - beg_idx;
+ region->allocated_meta += i * kMetadataSize;
+ if (region->allocated_user + region->allocated_meta > kRegionSize) {
+ Printf("Out of memory. Dying.\n");
+ Printf("The process has exhausted %zuMB for size class %zu.\n",
+ kRegionSize / 1024 / 1024, size);
+ Die();
+ }
+ }
+
+ void *AllocateBySizeClass(uptr class_id) {
+ CHECK_LT(class_id, kNumClasses);
+ RegionInfo *region = GetRegionInfo(class_id);
+ SpinMutexLock l(®ion->mutex);
+ if (region->free_list.empty()) {
+ PopulateFreeList(class_id, region);
+ }
+ CHECK(!region->free_list.empty());
+ AllocatorListNode *node = region->free_list.front();
+ region->free_list.pop_front();
+ return reinterpret_cast<void*>(node);
+ }
+
+ void DeallocateBySizeClass(void *p, uptr class_id) {
+ RegionInfo *region = GetRegionInfo(class_id);
+ SpinMutexLock l(®ion->mutex);
+ region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
+ }
+};
+
// Objects of this type should be used as local caches for SizeClassAllocator64.
// Since the typical use of this class is to have one object per thread in TLS,
// is has to be POD.
Removed: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h?rev=169367&view=auto
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator64.h (removed)
@@ -1,229 +0,0 @@
-//===-- sanitizer_allocator64.h ---------------------------------*- C++ -*-===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// Specialized allocator which works only in 64-bit address space.
-// It is used by ThreadSanitizer, MemorySanitizer and possibly other tools.
-// The main feature of this allocator is that the header is located far away
-// from the user memory region, so that the tool does not use extra shadow
-// for the header.
-// Another important feature is that the size class of a pointer is computed
-// without any memory accesses by simply looking at the address.
-//
-//===----------------------------------------------------------------------===//
-#ifndef SANITIZER_ALLOCATOR64_H
-#define SANITIZER_ALLOCATOR64_H
-
-#include "sanitizer_allocator.h"
-
-namespace __sanitizer {
-
-// SizeClassAllocator64 -- allocator for 64-bit address space.
-//
-// Space: a portion of address space of kSpaceSize bytes starting at
-// a fixed address (kSpaceBeg). Both constants are powers of two and
-// kSpaceBeg is kSpaceSize-aligned.
-//
-// Region: a part of Space dedicated to a single size class.
-// There are kNumClasses Regions of equal size.
-//
-// UserChunk: a piece of memory returned to user.
-// MetaChunk: kMetadataSize bytes of metadata associated with a UserChunk.
-//
-// A Region looks like this:
-// UserChunk1 ... UserChunkN <gap> MetaChunkN ... MetaChunk1
-template <const uptr kSpaceBeg, const uptr kSpaceSize,
- const uptr kMetadataSize, class SizeClassMap>
-class SizeClassAllocator64 {
- public:
- void Init() {
- CHECK_EQ(AllocBeg(), reinterpret_cast<uptr>(MmapFixedNoReserve(
- AllocBeg(), AllocSize())));
- }
-
- bool CanAllocate(uptr size, uptr alignment) {
- return size <= SizeClassMap::kMaxSize &&
- alignment <= SizeClassMap::kMaxSize;
- }
-
- void *Allocate(uptr size, uptr alignment) {
- CHECK(CanAllocate(size, alignment));
- return AllocateBySizeClass(SizeClassMap::ClassID(size));
- }
-
- void Deallocate(void *p) {
- CHECK(PointerIsMine(p));
- DeallocateBySizeClass(p, GetSizeClass(p));
- }
-
- // Allocate several chunks of the given class_id.
- void BulkAllocate(uptr class_id, AllocatorFreeList *free_list) {
- CHECK_LT(class_id, kNumClasses);
- RegionInfo *region = GetRegionInfo(class_id);
- SpinMutexLock l(®ion->mutex);
- if (region->free_list.empty()) {
- PopulateFreeList(class_id, region);
- }
- CHECK(!region->free_list.empty());
- uptr count = SizeClassMap::MaxCached(class_id);
- if (region->free_list.size() <= count) {
- free_list->append_front(®ion->free_list);
- } else {
- for (uptr i = 0; i < count; i++) {
- AllocatorListNode *node = region->free_list.front();
- region->free_list.pop_front();
- free_list->push_front(node);
- }
- }
- CHECK(!free_list->empty());
- }
-
- // Swallow the entire free_list for the given class_id.
- void BulkDeallocate(uptr class_id, AllocatorFreeList *free_list) {
- CHECK_LT(class_id, kNumClasses);
- RegionInfo *region = GetRegionInfo(class_id);
- SpinMutexLock l(®ion->mutex);
- region->free_list.append_front(free_list);
- }
-
- static bool PointerIsMine(void *p) {
- return reinterpret_cast<uptr>(p) / kSpaceSize == kSpaceBeg / kSpaceSize;
- }
-
- static uptr GetSizeClass(void *p) {
- return (reinterpret_cast<uptr>(p) / kRegionSize) % kNumClasses;
- }
-
- static void *GetBlockBegin(void *p) {
- uptr class_id = GetSizeClass(p);
- uptr size = SizeClassMap::Size(class_id);
- uptr chunk_idx = GetChunkIdx((uptr)p, size);
- uptr reg_beg = (uptr)p & ~(kRegionSize - 1);
- uptr begin = reg_beg + chunk_idx * size;
- return (void*)begin;
- }
-
- static uptr GetActuallyAllocatedSize(void *p) {
- CHECK(PointerIsMine(p));
- return SizeClassMap::Size(GetSizeClass(p));
- }
-
- uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
-
- void *GetMetaData(void *p) {
- uptr class_id = GetSizeClass(p);
- uptr size = SizeClassMap::Size(class_id);
- uptr chunk_idx = GetChunkIdx(reinterpret_cast<uptr>(p), size);
- return reinterpret_cast<void*>(kSpaceBeg + (kRegionSize * (class_id + 1)) -
- (1 + chunk_idx) * kMetadataSize);
- }
-
- uptr TotalMemoryUsed() {
- uptr res = 0;
- for (uptr i = 0; i < kNumClasses; i++)
- res += GetRegionInfo(i)->allocated_user;
- return res;
- }
-
- // Test-only.
- void TestOnlyUnmap() {
- UnmapOrDie(reinterpret_cast<void*>(AllocBeg()), AllocSize());
- }
-
- static uptr AllocBeg() { return kSpaceBeg; }
- static uptr AllocSize() { return kSpaceSize + AdditionalSize(); }
-
- typedef SizeClassMap SizeClassMapT;
- static const uptr kNumClasses = SizeClassMap::kNumClasses; // 2^k <= 256
-
- private:
- static const uptr kRegionSize = kSpaceSize / kNumClasses;
- COMPILER_CHECK(kSpaceBeg % kSpaceSize == 0);
- COMPILER_CHECK(kNumClasses <= SizeClassMap::kNumClasses);
- // kRegionSize must be >= 2^32.
- COMPILER_CHECK((kRegionSize) >= (1ULL << (SANITIZER_WORDSIZE / 2)));
- // Populate the free list with at most this number of bytes at once
- // or with one element if its size is greater.
- static const uptr kPopulateSize = 1 << 18;
-
- struct RegionInfo {
- SpinMutex mutex;
- AllocatorFreeList free_list;
- uptr allocated_user; // Bytes allocated for user memory.
- uptr allocated_meta; // Bytes allocated for metadata.
- char padding[kCacheLineSize - 3 * sizeof(uptr) - sizeof(AllocatorFreeList)];
- };
- COMPILER_CHECK(sizeof(RegionInfo) == kCacheLineSize);
-
- static uptr AdditionalSize() {
- uptr PageSize = GetPageSizeCached();
- uptr res = Max(sizeof(RegionInfo) * kNumClasses, PageSize);
- CHECK_EQ(res % PageSize, 0);
- return res;
- }
-
- RegionInfo *GetRegionInfo(uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- RegionInfo *regions = reinterpret_cast<RegionInfo*>(kSpaceBeg + kSpaceSize);
- return ®ions[class_id];
- }
-
- static uptr GetChunkIdx(uptr chunk, uptr size) {
- u32 offset = chunk % kRegionSize;
- // Here we divide by a non-constant. This is costly.
- // We require that kRegionSize is at least 2^32 so that offset is 32-bit.
- // We save 2x by using 32-bit div, but may need to use a 256-way switch.
- return offset / (u32)size;
- }
-
- void PopulateFreeList(uptr class_id, RegionInfo *region) {
- uptr size = SizeClassMap::Size(class_id);
- uptr beg_idx = region->allocated_user;
- uptr end_idx = beg_idx + kPopulateSize;
- region->free_list.clear();
- uptr region_beg = kSpaceBeg + kRegionSize * class_id;
- uptr idx = beg_idx;
- uptr i = 0;
- do { // do-while loop because we need to put at least one item.
- uptr p = region_beg + idx;
- region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
- idx += size;
- i++;
- } while (idx < end_idx);
- region->allocated_user += idx - beg_idx;
- region->allocated_meta += i * kMetadataSize;
- if (region->allocated_user + region->allocated_meta > kRegionSize) {
- Printf("Out of memory. Dying.\n");
- Printf("The process has exhausted %zuMB for size class %zu.\n",
- kRegionSize / 1024 / 1024, size);
- Die();
- }
- }
-
- void *AllocateBySizeClass(uptr class_id) {
- CHECK_LT(class_id, kNumClasses);
- RegionInfo *region = GetRegionInfo(class_id);
- SpinMutexLock l(®ion->mutex);
- if (region->free_list.empty()) {
- PopulateFreeList(class_id, region);
- }
- CHECK(!region->free_list.empty());
- AllocatorListNode *node = region->free_list.front();
- region->free_list.pop_front();
- return reinterpret_cast<void*>(node);
- }
-
- void DeallocateBySizeClass(void *p, uptr class_id) {
- RegionInfo *region = GetRegionInfo(class_id);
- SpinMutexLock l(®ion->mutex);
- region->free_list.push_front(reinterpret_cast<AllocatorListNode*>(p));
- }
-};
-
-} // namespace __sanitizer
-
-#endif // SANITIZER_ALLOCATOR64_H
Modified: compiler-rt/trunk/lib/sanitizer_common/tests/CMakeLists.txt
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/tests/CMakeLists.txt?rev=169368&r1=169367&r2=169368&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/tests/CMakeLists.txt (original)
+++ compiler-rt/trunk/lib/sanitizer_common/tests/CMakeLists.txt Wed Dec 5 04:09:15 2012
@@ -8,9 +8,6 @@
sanitizer_stackdepot_test.cc
sanitizer_test_main.cc
)
-if(CMAKE_SIZEOF_VOID_P EQUAL 8 AND NOT LLVM_BUILD_32_BITS)
- list(APPEND SANITIZER_UNITTESTS sanitizer_allocator64_test.cc)
-endif()
include_directories(..)
include_directories(../..)
Removed: compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc?rev=169367&view=auto
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_test.cc (removed)
@@ -1,310 +0,0 @@
-//===-- sanitizer_allocator64_test.cc -------------------------------------===//
-//
-// The LLVM Compiler Infrastructure
-//
-// This file is distributed under the University of Illinois Open Source
-// License. See LICENSE.TXT for details.
-//
-//===----------------------------------------------------------------------===//
-// Tests for sanitizer_allocator64.h.
-//===----------------------------------------------------------------------===//
-#include "sanitizer_common/sanitizer_allocator64.h"
-#include "gtest/gtest.h"
-
-#include <algorithm>
-#include <vector>
-
-static const uptr kAllocatorSpace = 0x700000000000ULL;
-static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
-
-typedef SizeClassAllocator64<
- kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
-
-typedef SizeClassAllocator64<
- kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
-
-template <class SizeClassMap>
-void TestSizeClassMap() {
- typedef SizeClassMap SCMap;
-#if 0
- for (uptr i = 0; i < SCMap::kNumClasses; i++) {
- printf("c%ld => %ld (%lx) cached=%ld(%ld)\n",
- i, SCMap::Size(i), SCMap::Size(i), SCMap::MaxCached(i) * SCMap::Size(i),
- SCMap::MaxCached(i));
- }
-#endif
- for (uptr c = 0; c < SCMap::kNumClasses; c++) {
- uptr s = SCMap::Size(c);
- CHECK_EQ(SCMap::ClassID(s), c);
- if (c != SCMap::kNumClasses - 1)
- CHECK_EQ(SCMap::ClassID(s + 1), c + 1);
- CHECK_EQ(SCMap::ClassID(s - 1), c);
- if (c)
- CHECK_GT(SCMap::Size(c), SCMap::Size(c-1));
- }
- CHECK_EQ(SCMap::ClassID(SCMap::kMaxSize + 1), 0);
-
- for (uptr s = 1; s <= SCMap::kMaxSize; s++) {
- uptr c = SCMap::ClassID(s);
- CHECK_LT(c, SCMap::kNumClasses);
- CHECK_GE(SCMap::Size(c), s);
- if (c > 0)
- CHECK_LT(SCMap::Size(c-1), s);
- }
-}
-
-TEST(SanitizerCommon, DefaultSizeClassMap) {
- TestSizeClassMap<DefaultSizeClassMap>();
-}
-
-TEST(SanitizerCommon, CompactSizeClassMap) {
- TestSizeClassMap<CompactSizeClassMap>();
-}
-
-template <class Allocator>
-void TestSizeClassAllocator() {
- Allocator a;
- a.Init();
-
- static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
- 50000, 60000, 100000, 300000, 500000, 1000000, 2000000};
-
- std::vector<void *> allocated;
-
- uptr last_total_allocated = 0;
- for (int i = 0; i < 5; i++) {
- // Allocate a bunch of chunks.
- for (uptr s = 0; s < sizeof(sizes) /sizeof(sizes[0]); s++) {
- uptr size = sizes[s];
- if (!a.CanAllocate(size, 1)) continue;
- // printf("s = %ld\n", size);
- uptr n_iter = std::max((uptr)2, 1000000 / size);
- for (uptr i = 0; i < n_iter; i++) {
- void *x = a.Allocate(size, 1);
- allocated.push_back(x);
- CHECK(a.PointerIsMine(x));
- CHECK_GE(a.GetActuallyAllocatedSize(x), size);
- uptr class_id = a.GetSizeClass(x);
- CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
- uptr *metadata = reinterpret_cast<uptr*>(a.GetMetaData(x));
- metadata[0] = reinterpret_cast<uptr>(x) + 1;
- metadata[1] = 0xABCD;
- }
- }
- // Deallocate all.
- for (uptr i = 0; i < allocated.size(); i++) {
- void *x = allocated[i];
- uptr *metadata = reinterpret_cast<uptr*>(a.GetMetaData(x));
- CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
- CHECK_EQ(metadata[1], 0xABCD);
- a.Deallocate(x);
- }
- allocated.clear();
- uptr total_allocated = a.TotalMemoryUsed();
- if (last_total_allocated == 0)
- last_total_allocated = total_allocated;
- CHECK_EQ(last_total_allocated, total_allocated);
- }
-
- a.TestOnlyUnmap();
-}
-
-TEST(SanitizerCommon, SizeClassAllocator64) {
- TestSizeClassAllocator<Allocator64>();
-}
-
-TEST(SanitizerCommon, SizeClassAllocator64Compact) {
- TestSizeClassAllocator<Allocator64Compact>();
-}
-
-template <class Allocator>
-void SizeClassAllocator64MetadataStress() {
- Allocator a;
- a.Init();
- static volatile void *sink;
-
- const uptr kNumAllocs = 10000;
- void *allocated[kNumAllocs];
- for (uptr i = 0; i < kNumAllocs; i++) {
- uptr size = (i % 4096) + 1;
- void *x = a.Allocate(size, 1);
- allocated[i] = x;
- }
- // Get Metadata kNumAllocs^2 times.
- for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
- sink = a.GetMetaData(allocated[i % kNumAllocs]);
- }
- for (uptr i = 0; i < kNumAllocs; i++) {
- a.Deallocate(allocated[i]);
- }
-
- a.TestOnlyUnmap();
- (void)sink;
-}
-
-TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
- SizeClassAllocator64MetadataStress<Allocator64>();
-}
-
-TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
- SizeClassAllocator64MetadataStress<Allocator64Compact>();
-}
-
-template<class Allocator>
-void FailInAssertionOnOOM() {
- Allocator a;
- a.Init();
- const uptr size = 1 << 20;
- for (int i = 0; i < 1000000; i++) {
- a.Allocate(size, 1);
- }
-
- a.TestOnlyUnmap();
-}
-
-TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
- EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
-}
-
-TEST(SanitizerCommon, LargeMmapAllocator) {
- LargeMmapAllocator a;
- a.Init();
-
- static const int kNumAllocs = 100;
- void *allocated[kNumAllocs];
- static const uptr size = 1000;
- // Allocate some.
- for (int i = 0; i < kNumAllocs; i++) {
- allocated[i] = a.Allocate(size, 1);
- }
- // Deallocate all.
- CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
- for (int i = 0; i < kNumAllocs; i++) {
- void *p = allocated[i];
- CHECK(a.PointerIsMine(p));
- a.Deallocate(p);
- }
- // Check that non left.
- CHECK_EQ(a.TotalMemoryUsed(), 0);
-
- // Allocate some more, also add metadata.
- for (int i = 0; i < kNumAllocs; i++) {
- void *x = a.Allocate(size, 1);
- CHECK_GE(a.GetActuallyAllocatedSize(x), size);
- uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
- *meta = i;
- allocated[i] = x;
- }
- CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
- // Deallocate all in reverse order.
- for (int i = 0; i < kNumAllocs; i++) {
- int idx = kNumAllocs - i - 1;
- void *p = allocated[idx];
- uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
- CHECK_EQ(*meta, idx);
- CHECK(a.PointerIsMine(p));
- a.Deallocate(p);
- }
- CHECK_EQ(a.TotalMemoryUsed(), 0);
-
- for (uptr alignment = 8; alignment <= (1<<28); alignment *= 2) {
- for (int i = 0; i < kNumAllocs; i++) {
- uptr size = ((i % 10) + 1) * 4096;
- allocated[i] = a.Allocate(size, alignment);
- CHECK_EQ(0, (uptr)allocated[i] % alignment);
- char *p = (char*)allocated[i];
- p[0] = p[size - 1] = 0;
- }
- for (int i = 0; i < kNumAllocs; i++) {
- a.Deallocate(allocated[i]);
- }
- }
-}
-
-template
-<class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
-void TestCombinedAllocator() {
- CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator> a;
- a.Init();
-
- AllocatorCache cache;
- cache.Init();
-
- EXPECT_EQ(a.Allocate(&cache, -1, 1), (void*)0);
- EXPECT_EQ(a.Allocate(&cache, -1, 1024), (void*)0);
- EXPECT_EQ(a.Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
- EXPECT_EQ(a.Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
- EXPECT_EQ(a.Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
-
- const uptr kNumAllocs = 100000;
- const uptr kNumIter = 10;
- for (uptr iter = 0; iter < kNumIter; iter++) {
- std::vector<void*> allocated;
- for (uptr i = 0; i < kNumAllocs; i++) {
- uptr size = (i % (1 << 14)) + 1;
- if ((i % 1024) == 0)
- size = 1 << (10 + (i % 14));
- void *x = a.Allocate(&cache, size, 1);
- uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
- CHECK_EQ(*meta, 0);
- *meta = size;
- allocated.push_back(x);
- }
-
- random_shuffle(allocated.begin(), allocated.end());
-
- for (uptr i = 0; i < kNumAllocs; i++) {
- void *x = allocated[i];
- uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
- CHECK_NE(*meta, 0);
- CHECK(a.PointerIsMine(x));
- *meta = 0;
- a.Deallocate(&cache, x);
- }
- allocated.clear();
- a.SwallowCache(&cache);
- }
- a.TestOnlyUnmap();
-}
-
-TEST(SanitizerCommon, CombinedAllocator) {
- TestCombinedAllocator<Allocator64,
- LargeMmapAllocator,
- SizeClassAllocatorLocalCache<Allocator64> > ();
-}
-
-template <class AllocatorCache>
-void TestSizeClassAllocatorLocalCache() {
- static THREADLOCAL AllocatorCache static_allocator_cache;
- static_allocator_cache.Init();
- AllocatorCache cache;
- typename AllocatorCache::Allocator a;
-
- a.Init();
- cache.Init();
-
- const uptr kNumAllocs = 10000;
- const int kNumIter = 100;
- uptr saved_total = 0;
- for (int i = 0; i < kNumIter; i++) {
- void *allocated[kNumAllocs];
- for (uptr i = 0; i < kNumAllocs; i++) {
- allocated[i] = cache.Allocate(&a, 0);
- }
- for (uptr i = 0; i < kNumAllocs; i++) {
- cache.Deallocate(&a, 0, allocated[i]);
- }
- cache.Drain(&a);
- uptr total_allocated = a.TotalMemoryUsed();
- if (saved_total)
- CHECK_EQ(saved_total, total_allocated);
- saved_total = total_allocated;
- }
-
- a.TestOnlyUnmap();
-}
-
-TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
- TestSizeClassAllocatorLocalCache<
- SizeClassAllocatorLocalCache<Allocator64> >();
-}
Modified: compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc?rev=169368&r1=169367&r2=169368&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator64_testlib.cc Wed Dec 5 04:09:15 2012
@@ -10,7 +10,7 @@
// The primary purpose of this file is an end-to-end integration test
// for CombinedAllocator.
//===----------------------------------------------------------------------===//
-#include "sanitizer_common/sanitizer_allocator64.h"
+#include "sanitizer_common/sanitizer_allocator.h"
#include <stddef.h>
#include <stdio.h>
#include <unistd.h>
Modified: compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc?rev=169368&r1=169367&r2=169368&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc Wed Dec 5 04:09:15 2012
@@ -8,13 +8,328 @@
//===----------------------------------------------------------------------===//
//
// This file is a part of ThreadSanitizer/AddressSanitizer runtime.
+// Tests for sanitizer_allocator.h.
//
//===----------------------------------------------------------------------===//
+#include "sanitizer_common/sanitizer_allocator.h"
#include "sanitizer_common/sanitizer_common.h"
+
#include "gtest/gtest.h"
+
#include <stdlib.h>
+#include <algorithm>
+#include <vector>
+
+#if SANITIZER_WORDSIZE == 64
+static const uptr kAllocatorSpace = 0x700000000000ULL;
+static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
+
+typedef SizeClassAllocator64<
+ kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
+
+typedef SizeClassAllocator64<
+ kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
+#endif
+
+template <class SizeClassMap>
+void TestSizeClassMap() {
+ typedef SizeClassMap SCMap;
+#if 0
+ for (uptr i = 0; i < SCMap::kNumClasses; i++) {
+ printf("c%ld => %ld (%lx) cached=%ld(%ld)\n",
+ i, SCMap::Size(i), SCMap::Size(i), SCMap::MaxCached(i) * SCMap::Size(i),
+ SCMap::MaxCached(i));
+ }
+#endif
+ for (uptr c = 0; c < SCMap::kNumClasses; c++) {
+ uptr s = SCMap::Size(c);
+ CHECK_EQ(SCMap::ClassID(s), c);
+ if (c != SCMap::kNumClasses - 1)
+ CHECK_EQ(SCMap::ClassID(s + 1), c + 1);
+ CHECK_EQ(SCMap::ClassID(s - 1), c);
+ if (c)
+ CHECK_GT(SCMap::Size(c), SCMap::Size(c-1));
+ }
+ CHECK_EQ(SCMap::ClassID(SCMap::kMaxSize + 1), 0);
+
+ for (uptr s = 1; s <= SCMap::kMaxSize; s++) {
+ uptr c = SCMap::ClassID(s);
+ CHECK_LT(c, SCMap::kNumClasses);
+ CHECK_GE(SCMap::Size(c), s);
+ if (c > 0)
+ CHECK_LT(SCMap::Size(c-1), s);
+ }
+}
+
+TEST(SanitizerCommon, DefaultSizeClassMap) {
+ TestSizeClassMap<DefaultSizeClassMap>();
+}
+
+TEST(SanitizerCommon, CompactSizeClassMap) {
+ TestSizeClassMap<CompactSizeClassMap>();
+}
+
+template <class Allocator>
+void TestSizeClassAllocator() {
+ Allocator a;
+ a.Init();
+
+ static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
+ 50000, 60000, 100000, 300000, 500000, 1000000, 2000000};
+
+ std::vector<void *> allocated;
+
+ uptr last_total_allocated = 0;
+ for (int i = 0; i < 5; i++) {
+ // Allocate a bunch of chunks.
+ for (uptr s = 0; s < sizeof(sizes) /sizeof(sizes[0]); s++) {
+ uptr size = sizes[s];
+ if (!a.CanAllocate(size, 1)) continue;
+ // printf("s = %ld\n", size);
+ uptr n_iter = std::max((uptr)2, 1000000 / size);
+ for (uptr i = 0; i < n_iter; i++) {
+ void *x = a.Allocate(size, 1);
+ allocated.push_back(x);
+ CHECK(a.PointerIsMine(x));
+ CHECK_GE(a.GetActuallyAllocatedSize(x), size);
+ uptr class_id = a.GetSizeClass(x);
+ CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
+ uptr *metadata = reinterpret_cast<uptr*>(a.GetMetaData(x));
+ metadata[0] = reinterpret_cast<uptr>(x) + 1;
+ metadata[1] = 0xABCD;
+ }
+ }
+ // Deallocate all.
+ for (uptr i = 0; i < allocated.size(); i++) {
+ void *x = allocated[i];
+ uptr *metadata = reinterpret_cast<uptr*>(a.GetMetaData(x));
+ CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
+ CHECK_EQ(metadata[1], 0xABCD);
+ a.Deallocate(x);
+ }
+ allocated.clear();
+ uptr total_allocated = a.TotalMemoryUsed();
+ if (last_total_allocated == 0)
+ last_total_allocated = total_allocated;
+ CHECK_EQ(last_total_allocated, total_allocated);
+ }
+
+ a.TestOnlyUnmap();
+}
+
+#if SANITIZER_WORDSIZE == 64
+TEST(SanitizerCommon, SizeClassAllocator64) {
+ TestSizeClassAllocator<Allocator64>();
+}
+
+TEST(SanitizerCommon, SizeClassAllocator64Compact) {
+ TestSizeClassAllocator<Allocator64Compact>();
+}
+#endif
+
+template <class Allocator>
+void SizeClassAllocator64MetadataStress() {
+ Allocator a;
+ a.Init();
+ static volatile void *sink;
+
+ const uptr kNumAllocs = 10000;
+ void *allocated[kNumAllocs];
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ uptr size = (i % 4096) + 1;
+ void *x = a.Allocate(size, 1);
+ allocated[i] = x;
+ }
+ // Get Metadata kNumAllocs^2 times.
+ for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
+ sink = a.GetMetaData(allocated[i % kNumAllocs]);
+ }
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ a.Deallocate(allocated[i]);
+ }
+
+ a.TestOnlyUnmap();
+ (void)sink;
+}
+
+#if SANITIZER_WORDSIZE == 64
+TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
+ SizeClassAllocator64MetadataStress<Allocator64>();
+}
-namespace __sanitizer {
+TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
+ SizeClassAllocator64MetadataStress<Allocator64Compact>();
+}
+#endif
+
+template<class Allocator>
+void FailInAssertionOnOOM() {
+ Allocator a;
+ a.Init();
+ const uptr size = 1 << 20;
+ for (int i = 0; i < 1000000; i++) {
+ a.Allocate(size, 1);
+ }
+
+ a.TestOnlyUnmap();
+}
+
+#if SANITIZER_WORDSIZE == 64
+TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
+ EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
+}
+#endif
+
+TEST(SanitizerCommon, LargeMmapAllocator) {
+ fprintf(stderr, "xxxx %ld\n", 0L);
+ LargeMmapAllocator a;
+ a.Init();
+
+ static const int kNumAllocs = 100;
+ void *allocated[kNumAllocs];
+ static const uptr size = 1000;
+ // Allocate some.
+ for (int i = 0; i < kNumAllocs; i++) {
+ fprintf(stderr, "zzz0 %ld\n", size);
+ allocated[i] = a.Allocate(size, 1);
+ }
+ // Deallocate all.
+ CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
+ for (int i = 0; i < kNumAllocs; i++) {
+ void *p = allocated[i];
+ CHECK(a.PointerIsMine(p));
+ a.Deallocate(p);
+ }
+ // Check that non left.
+ CHECK_EQ(a.TotalMemoryUsed(), 0);
+
+ // Allocate some more, also add metadata.
+ for (int i = 0; i < kNumAllocs; i++) {
+ fprintf(stderr, "zzz1 %ld\n", size);
+ void *x = a.Allocate(size, 1);
+ CHECK_GE(a.GetActuallyAllocatedSize(x), size);
+ uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
+ *meta = i;
+ allocated[i] = x;
+ }
+ CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
+ // Deallocate all in reverse order.
+ for (int i = 0; i < kNumAllocs; i++) {
+ int idx = kNumAllocs - i - 1;
+ void *p = allocated[idx];
+ uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
+ CHECK_EQ(*meta, idx);
+ CHECK(a.PointerIsMine(p));
+ a.Deallocate(p);
+ }
+ CHECK_EQ(a.TotalMemoryUsed(), 0);
+ uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
+ for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
+ for (int i = 0; i < kNumAllocs; i++) {
+ uptr size = ((i % 10) + 1) * 4096;
+ fprintf(stderr, "zzz1 %ld %ld\n", size, alignment);
+ allocated[i] = a.Allocate(size, alignment);
+ CHECK_EQ(0, (uptr)allocated[i] % alignment);
+ char *p = (char*)allocated[i];
+ p[0] = p[size - 1] = 0;
+ }
+ for (int i = 0; i < kNumAllocs; i++) {
+ a.Deallocate(allocated[i]);
+ }
+ }
+}
+
+template
+<class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
+void TestCombinedAllocator() {
+ CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator> a;
+ a.Init();
+
+ AllocatorCache cache;
+ cache.Init();
+
+ EXPECT_EQ(a.Allocate(&cache, -1, 1), (void*)0);
+ EXPECT_EQ(a.Allocate(&cache, -1, 1024), (void*)0);
+ EXPECT_EQ(a.Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
+ EXPECT_EQ(a.Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
+ EXPECT_EQ(a.Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
+
+ const uptr kNumAllocs = 100000;
+ const uptr kNumIter = 10;
+ for (uptr iter = 0; iter < kNumIter; iter++) {
+ std::vector<void*> allocated;
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ uptr size = (i % (1 << 14)) + 1;
+ if ((i % 1024) == 0)
+ size = 1 << (10 + (i % 14));
+ void *x = a.Allocate(&cache, size, 1);
+ uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
+ CHECK_EQ(*meta, 0);
+ *meta = size;
+ allocated.push_back(x);
+ }
+
+ random_shuffle(allocated.begin(), allocated.end());
+
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ void *x = allocated[i];
+ uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
+ CHECK_NE(*meta, 0);
+ CHECK(a.PointerIsMine(x));
+ *meta = 0;
+ a.Deallocate(&cache, x);
+ }
+ allocated.clear();
+ a.SwallowCache(&cache);
+ }
+ a.TestOnlyUnmap();
+}
+
+#if SANITIZER_WORDSIZE == 64
+TEST(SanitizerCommon, CombinedAllocator) {
+ TestCombinedAllocator<Allocator64,
+ LargeMmapAllocator,
+ SizeClassAllocatorLocalCache<Allocator64> > ();
+}
+#endif
+
+template <class AllocatorCache>
+void TestSizeClassAllocatorLocalCache() {
+ static THREADLOCAL AllocatorCache static_allocator_cache;
+ static_allocator_cache.Init();
+ AllocatorCache cache;
+ typename AllocatorCache::Allocator a;
+
+ a.Init();
+ cache.Init();
+
+ const uptr kNumAllocs = 10000;
+ const int kNumIter = 100;
+ uptr saved_total = 0;
+ for (int i = 0; i < kNumIter; i++) {
+ void *allocated[kNumAllocs];
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ allocated[i] = cache.Allocate(&a, 0);
+ }
+ for (uptr i = 0; i < kNumAllocs; i++) {
+ cache.Deallocate(&a, 0, allocated[i]);
+ }
+ cache.Drain(&a);
+ uptr total_allocated = a.TotalMemoryUsed();
+ if (saved_total)
+ CHECK_EQ(saved_total, total_allocated);
+ saved_total = total_allocated;
+ }
+
+ a.TestOnlyUnmap();
+}
+
+#if SANITIZER_WORDSIZE == 64
+TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
+ TestSizeClassAllocatorLocalCache<
+ SizeClassAllocatorLocalCache<Allocator64> >();
+}
+#endif
TEST(Allocator, Basic) {
char *p = (char*)InternalAlloc(10);
@@ -54,5 +369,3 @@
EXPECT_EQ('c', char_buf[i]);
}
}
-
-} // namespace __sanitizer
Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h?rev=169368&r1=169367&r2=169368&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_rtl.h Wed Dec 5 04:09:15 2012
@@ -27,7 +27,7 @@
#define TSAN_RTL_H
#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_allocator64.h"
+#include "sanitizer_common/sanitizer_allocator.h"
#include "tsan_clock.h"
#include "tsan_defs.h"
#include "tsan_flags.h"
More information about the llvm-commits
mailing list