[compiler-rt] r281674 - [asan] Reenable 64-bit allocator on android/aarch64.

Evgeniy Stepanov via llvm-commits llvm-commits at lists.llvm.org
Thu Sep 15 15:34:53 PDT 2016


Author: eugenis
Date: Thu Sep 15 17:34:53 2016
New Revision: 281674

URL: http://llvm.org/viewvc/llvm-project?rev=281674&view=rev
Log:
[asan] Reenable 64-bit allocator on android/aarch64.

This is a re-commit of r281371, with updated tests.

Modified:
    compiler-rt/trunk/lib/sanitizer_common/sanitizer_platform.h
    compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc

Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_platform.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_platform.h?rev=281674&r1=281673&r2=281674&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_platform.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_platform.h Thu Sep 15 17:34:53 2016
@@ -168,7 +168,9 @@
 // For such platforms build this code with -DSANITIZER_CAN_USE_ALLOCATOR64=0 or
 // change the definition of SANITIZER_CAN_USE_ALLOCATOR64 here.
 #ifndef SANITIZER_CAN_USE_ALLOCATOR64
-#if defined(__mips64) || defined(__aarch64__)
+# if SANITIZER_ANDROID && defined(__aarch64__)
+#  define SANITIZER_CAN_USE_ALLOCATOR64 1
+# elif defined(__mips64) || defined(__aarch64__)
 #  define SANITIZER_CAN_USE_ALLOCATOR64 0
 # else
 #  define SANITIZER_CAN_USE_ALLOCATOR64 (SANITIZER_WORDSIZE == 64)

Modified: compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc?rev=281674&r1=281673&r2=281674&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_allocator_test.cc Thu Sep 15 17:34:53 2016
@@ -38,17 +38,24 @@ using namespace __sanitizer;
 static const uptr kAllocatorSpace = ~(uptr)0;
 static const uptr kAllocatorSize  =  0x8000000000ULL;  // 500G
 static const u64 kAddressSpaceSize = 1ULL << 47;
+typedef DefaultSizeClassMap SizeClassMap;
+#elif SANITIZER_ANDROID && defined(__aarch64__)
+static const uptr kAllocatorSpace = 0x3000000000ULL;
+static const uptr kAllocatorSize  = 0x2000000000ULL;
+static const u64 kAddressSpaceSize = 1ULL << 39;
+typedef VeryCompactSizeClassMap SizeClassMap;
 #else
 static const uptr kAllocatorSpace = 0x700000000000ULL;
 static const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.
 static const u64 kAddressSpaceSize = 1ULL << 47;
+typedef DefaultSizeClassMap SizeClassMap;
 #endif
 
 struct AP64 {  // Allocator Params. Short name for shorter demangled names..
   static const uptr kSpaceBeg = kAllocatorSpace;
   static const uptr kSpaceSize = kAllocatorSize;
   static const uptr kMetadataSize = 16;
-  typedef DefaultSizeClassMap SizeClassMap;
+  typedef ::SizeClassMap SizeClassMap;
   typedef NoOpMapUnmapCallback MapUnmapCallback;
   static const uptr kFlags = 0;
 };
@@ -57,7 +64,7 @@ struct AP64Dyn {
   static const uptr kSpaceBeg = ~(uptr)0;
   static const uptr kSpaceSize = kAllocatorSize;
   static const uptr kMetadataSize = 16;
-  typedef DefaultSizeClassMap SizeClassMap;
+  typedef ::SizeClassMap SizeClassMap;
   typedef NoOpMapUnmapCallback MapUnmapCallback;
   static const uptr kFlags = 0;
 };
@@ -212,9 +219,12 @@ TEST(SanitizerCommon, SizeClassAllocator
   TestSizeClassAllocator<Allocator64Dynamic>();
 }
 
+#if !SANITIZER_ANDROID
 TEST(SanitizerCommon, SizeClassAllocator64Compact) {
   TestSizeClassAllocator<Allocator64Compact>();
 }
+#endif
+
 TEST(SanitizerCommon, SizeClassAllocator64VeryCompact) {
   TestSizeClassAllocator<Allocator64VeryCompact>();
 }
@@ -237,7 +247,7 @@ void SizeClassAllocatorMetadataStress()
   void *allocated[kNumAllocs];
   void *meta[kNumAllocs];
   for (uptr i = 0; i < kNumAllocs; i++) {
-    void *x = cache.Allocate(a, 1 + i % 50);
+    void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1));
     allocated[i] = x;
     meta[i] = a->GetMetaData(x);
   }
@@ -248,7 +258,7 @@ void SizeClassAllocatorMetadataStress()
     EXPECT_EQ(m, meta[idx]);
   }
   for (uptr i = 0; i < kNumAllocs; i++) {
-    cache.Deallocate(a, 1 + i % 50, allocated[i]);
+    cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]);
   }
 
   a->TestOnlyUnmap();
@@ -267,10 +277,13 @@ TEST(SanitizerCommon, SizeClassAllocator
   SizeClassAllocatorMetadataStress<Allocator64Dynamic>();
 }
 
+#if !SANITIZER_ANDROID
 TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
   SizeClassAllocatorMetadataStress<Allocator64Compact>();
 }
 #endif
+
+#endif
 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
 TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
   SizeClassAllocatorMetadataStress<Allocator32Compact>();
@@ -304,14 +317,18 @@ void SizeClassAllocatorGetBlockBeginStre
 // to run them all at the same time. FIXME: Make them not flaky and reenable.
 #if !SANITIZER_WINDOWS
 TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
-  SizeClassAllocatorGetBlockBeginStress<Allocator64>(1ULL << 33);
+  SizeClassAllocatorGetBlockBeginStress<Allocator64>(
+      1ULL << (SANITIZER_ANDROID ? 31 : 33));
 }
 TEST(SanitizerCommon, SizeClassAllocator64DynamicGetBlockBegin) {
-  SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(1ULL << 33);
+  SizeClassAllocatorGetBlockBeginStress<Allocator64Dynamic>(
+      1ULL << (SANITIZER_ANDROID ? 31 : 33));
 }
+#if !SANITIZER_ANDROID
 TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
   SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);
 }
+#endif
 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) {
   // Does not have > 4Gb for each class.
   SizeClassAllocatorGetBlockBeginStress<Allocator64VeryCompact>(1ULL << 31);
@@ -336,12 +353,12 @@ int TestMapUnmapCallback::unmap_count;
 #if !SANITIZER_WINDOWS
 
 struct AP64WithCallback {
-    static const uptr kSpaceBeg = kAllocatorSpace;
-    static const uptr kSpaceSize = kAllocatorSize;
-    static const uptr kMetadataSize = 16;
-    typedef DefaultSizeClassMap SizeClassMap;
-    typedef TestMapUnmapCallback MapUnmapCallback;
-    static const uptr kFlags = 0;
+  static const uptr kSpaceBeg = kAllocatorSpace;
+  static const uptr kSpaceSize = kAllocatorSize;
+  static const uptr kMetadataSize = 16;
+  typedef ::SizeClassMap SizeClassMap;
+  typedef TestMapUnmapCallback MapUnmapCallback;
+  static const uptr kFlags = 0;
 };
 
 TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
@@ -358,7 +375,7 @@ TEST(SanitizerCommon, SizeClassAllocator
   stats.Init();
   const size_t kNumChunks = 128;
   uint32_t chunks[kNumChunks];
-  a->GetFromAllocator(&stats, 32, chunks, kNumChunks);
+  a->GetFromAllocator(&stats, 30, chunks, kNumChunks);
   // State + alloc + metadata + freearray.
   EXPECT_EQ(TestMapUnmapCallback::map_count, 4);
   a->TestOnlyUnmap();
@@ -430,7 +447,7 @@ void FailInAssertionOnOOM() {
 
 // Don't test OOM conditions on Win64 because it causes other tests on the same
 // machine to OOM.
-#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64
+#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
 TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
   EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
 }
@@ -583,11 +600,13 @@ TEST(SanitizerCommon, CombinedAllocator6
       SizeClassAllocatorLocalCache<Allocator64Dynamic> > ();
 }
 
+#if !SANITIZER_ANDROID
 TEST(SanitizerCommon, CombinedAllocator64Compact) {
   TestCombinedAllocator<Allocator64Compact,
       LargeMmapAllocator<>,
       SizeClassAllocatorLocalCache<Allocator64Compact> > ();
 }
+#endif
 
 TEST(SanitizerCommon, CombinedAllocator64VeryCompact) {
   TestCombinedAllocator<Allocator64VeryCompact,
@@ -650,10 +669,12 @@ TEST(SanitizerCommon, SizeClassAllocator
       SizeClassAllocatorLocalCache<Allocator64Dynamic> >();
 }
 
+#if !SANITIZER_ANDROID
 TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
   TestSizeClassAllocatorLocalCache<
       SizeClassAllocatorLocalCache<Allocator64Compact> >();
 }
+#endif
 TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) {
   TestSizeClassAllocatorLocalCache<
       SizeClassAllocatorLocalCache<Allocator64VeryCompact> >();
@@ -908,7 +929,7 @@ TEST(SanitizerCommon, LargeMmapAllocator
 
 // Don't test OOM conditions on Win64 because it causes other tests on the same
 // machine to OOM.
-#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64
+#if SANITIZER_CAN_USE_ALLOCATOR64 && !SANITIZER_WINDOWS64 && !SANITIZER_ANDROID
 typedef SizeClassMap<3, 4, 8, 63, 128, 16> SpecialSizeClassMap;
 struct AP64_SpecialSizeClassMap {
   static const uptr kSpaceBeg = kAllocatorSpace;




More information about the llvm-commits mailing list