[PATCH] D60243: [LSan][AArch64] Speed-up leak and address sanitizers on AArch64 for 48-bit VMA
Vitaly Buka via Phabricator via llvm-commits
llvm-commits at lists.llvm.org
Tue Aug 20 22:14:40 PDT 2019
vitalybuka requested changes to this revision.
vitalybuka added a comment.
This revision now requires changes to proceed.
I've started to fix this but realized that it's more than a quick fix
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
index c11d1f83fb54..47b4aba488bd 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_combined.h
@@ -159,7 +159,7 @@ class CombinedAllocator {
void TestOnlyUnmap() { primary_.TestOnlyUnmap(); }
void InitCache(AllocatorCache *cache) {
- cache->Init(&stats_);
+ cache->Init(&primary_, &stats_);
}
void DestroyCache(AllocatorCache *cache) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h
index 108dfc231a22..d63ef6aa443f 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_allocator_local_cache.h
@@ -18,7 +18,7 @@ template <class SizeClassAllocator>
struct SizeClassAllocator64LocalCache {
typedef SizeClassAllocator Allocator;
- void Init(AllocatorGlobalStats *s) {
+ void Init(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
stats_.Init();
if (s)
s->Register(&stats_);
@@ -122,7 +122,7 @@ struct SizeClassAllocator32LocalCache {
typedef SizeClassAllocator Allocator;
typedef typename Allocator::TransferBatch TransferBatch;
- void Init(AllocatorGlobalStats *s) {
+ void Init(SizeClassAllocator *allocator, AllocatorGlobalStats *s) {
stats_.Init();
if (s)
s->Register(&stats_);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_runtime_select_allocator.h b/compiler-rt/lib/sanitizer_common/sanitizer_runtime_select_allocator.h
index 3b9e35445981..d8538f88428e 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_runtime_select_allocator.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_runtime_select_allocator.h
@@ -26,11 +26,11 @@ class RuntimeSelectAllocator {
typename Allocator2::AllocatorCache a2;
public:
- void Init(AllocatorGlobalStats *s) {
- if (this->use_first_allocator)
- a1.Init(s);
+ void Init(RuntimeSelectAllocator *allocator, AllocatorGlobalStats *s) {
+ if (allocator->use_first_allocator)
+ a1.Init(&allocator->a1, s);
else
- a2.Init(s);
+ a2.Init(&allocator->a2, s);
}
void *Allocate(RuntimeSelectAllocator *allocator, uptr class_id) {
if (allocator->use_first_allocator)
@@ -86,6 +86,18 @@ class RuntimeSelectAllocator {
return Allocator2::ClassID(size);
}
+ uptr LargestClassID() {
+ if (use_first_allocator)
+ return Allocator1::SizeClassMapT::kLargestClassID;
+ return Allocator2::SizeClassMapT::kLargestClassID;
+ }
+
+ uptr ClassSize(uptr id) {
+ if (use_first_allocator)
+ return Allocator1::SizeClassMapT::Size(id);
+ return Allocator2::SizeClassMapT::Size(id);
+ }
+
uptr KNumClasses() {
if (use_first_allocator)
return Allocator1::KNumClasses();
@@ -110,6 +122,12 @@ class RuntimeSelectAllocator {
return a2.GetMetaData(p);
}
+ uptr TotalMemoryUsed() {
+ if (use_first_allocator)
+ return a1.TotalMemoryUsed();
+ return a2.TotalMemoryUsed();
+ }
+
uptr GetSizeClass(const void *p) {
if (use_first_allocator)
return a1.GetSizeClass(p);
diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp
index dc26a0a445f0..e70449e07802 100644
--- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp
+++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_allocator_test.cpp
@@ -160,8 +160,19 @@ using Allocator32CompactASVT =
SizeClassAllocator32<AP32Compact<AddressSpaceView>>;
using Allocator32Compact = Allocator32CompactASVT<LocalAddressSpaceView>;
+#if SANITIZER_CAN_USE_ALLOCATOR64
using Allocator32or64Compact =
RuntimeSelectAllocator<Allocator32Compact, Allocator64Compact>;
+class Allocator32or64CompactUse1 : public Allocator32or64Compact {
+ public:
+ Allocator32or64CompactUse1() { use_first_allocator = true; }
+};
+
+class Allocator32or64CompactUse2 : public Allocator32or64Compact {
+ public:
+ Allocator32or64CompactUse2() { use_first_allocator = false; }
+};
+#endif
template <class SizeClassMap>
void TestSizeClassMap() {
@@ -196,7 +207,7 @@ void TestSizeClassAllocator() {
a->Init(kReleaseToOSIntervalNever);
typename Allocator::AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
- cache.Init(0);
+ cache.Init(a, 0);
static const uptr sizes[] = {
1, 16, 30, 40, 100, 1000, 10000,
@@ -215,7 +226,7 @@ void TestSizeClassAllocator() {
uptr n_iter = std::max((uptr)6, 4000000 / size);
// fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
for (uptr i = 0; i < n_iter; i++) {
- uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
+ uptr class_id0 = a->ClassID(size);
char *x = (char*)cache.Allocate(a, class_id0);
x[0] = 0;
x[size - 1] = 0;
@@ -228,7 +239,7 @@ void TestSizeClassAllocator() {
CHECK(a->PointerIsMine(x + size / 2));
CHECK_GE(a->GetActuallyAllocatedSize(x), size);
uptr class_id = a->GetSizeClass(x);
- CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
+ CHECK_EQ(class_id, a->ClassID(size));
uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
metadata[0] = reinterpret_cast<uptr>(x) + 1;
metadata[1] = 0xABCD;
@@ -278,10 +289,8 @@ TEST(SanitizerCommon, SizeClassAllocator64Compact) {
}
TEST(SanitizerCommon, SizeClassAllocator32or64Compact) {
- Allocator32or64Compact::UseAllocator1 = false;
- TestSizeClassAllocator<Allocator32or64Compact>();
- Allocator32or64Compact::UseAllocator1 = true;
- TestSizeClassAllocator<Allocator32or64Compact>();
+ TestSizeClassAllocator<Allocator32or64CompactUse1>();
+ TestSizeClassAllocator<Allocator32or64CompactUse2>();
}
TEST(SanitizerCommon, SizeClassAllocator64Dense) {
@@ -327,13 +336,13 @@ void SizeClassAllocatorMetadataStress() {
a->Init(kReleaseToOSIntervalNever);
typename Allocator::AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
- cache.Init(0);
+ cache.Init(a, 0);
const uptr kNumAllocs = 1 << 13;
void *allocated[kNumAllocs];
void *meta[kNumAllocs];
for (uptr i = 0; i < kNumAllocs; i++) {
- void *x = cache.Allocate(a, 1 + i % (Allocator::kNumClasses - 1));
+ void *x = cache.Allocate(a, 1 + i % (a->KNumClasses() - 1));
allocated[i] = x;
meta[i] = a->GetMetaData(x);
}
@@ -344,7 +353,7 @@ void SizeClassAllocatorMetadataStress() {
EXPECT_EQ(m, meta[idx]);
}
for (uptr i = 0; i < kNumAllocs; i++) {
- cache.Deallocate(a, 1 + i % (Allocator::kNumClasses - 1), allocated[i]);
+ cache.Deallocate(a, 1 + i % (a->KNumClasses() - 1), allocated[i]);
}
a->TestOnlyUnmap();
@@ -368,10 +377,8 @@ TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
SizeClassAllocatorMetadataStress<Allocator64Compact>();
}
TEST(SanitizerCommon, SizeClassAllocator32or64CompactMetadataStress) {
- Allocator32or64Compact::UseAllocator1 = false;
- SizeClassAllocatorMetadataStress<Allocator32or64Compact>();
- Allocator32or64Compact::UseAllocator1 = true;
- SizeClassAllocatorMetadataStress<Allocator32or64Compact>();
+ SizeClassAllocatorMetadataStress<Allocator32or64CompactUse1>();
+ SizeClassAllocatorMetadataStress<Allocator32or64CompactUse2>();
}
#endif
@@ -387,10 +394,10 @@ void SizeClassAllocatorGetBlockBeginStress(u64 TotalSize) {
a->Init(kReleaseToOSIntervalNever);
typename Allocator::AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
- cache.Init(0);
+ cache.Init(a, 0);
- uptr max_size_class = Allocator::SizeClassMapT::kLargestClassID;
- uptr size = Allocator::SizeClassMapT::Size(max_size_class);
+ uptr max_size_class = a->LargestClassID();
+ uptr size = a->ClassSize(max_size_class);
// Make sure we correctly compute GetBlockBegin() w/o overflow.
for (size_t i = 0; i <= TotalSize / size; i++) {
void *x = cache.Allocate(a, max_size_class);
@@ -421,10 +428,8 @@ TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>(1ULL << 33);
}
TEST(SanitizerCommon, SizeClassAllocator32or64CompactGetBlockBegin) {
- Allocator32or64Compact::UseAllocator1 = false;
- SizeClassAllocatorGetBlockBeginStress<Allocator32or64Compact>(1ULL << 33);
- Allocator32or64Compact::UseAllocator1 = true;
- SizeClassAllocatorGetBlockBeginStress<Allocator32or64Compact>(1ULL << 33);
+ SizeClassAllocatorGetBlockBeginStress<Allocator32or64CompactUse1>(1ULL << 33);
+ SizeClassAllocatorGetBlockBeginStress<Allocator32or64CompactUse2>(1ULL << 33);
}
#endif
TEST(SanitizerCommon, SizeClassAllocator64VeryCompactGetBlockBegin) {
@@ -470,7 +475,7 @@ TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
typename Allocator64WithCallBack::AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
- cache.Init(0);
+ cache.Init(a, 0);
AllocatorStats stats;
stats.Init();
const size_t kNumChunks = 128;
@@ -506,7 +511,7 @@ TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
Allocator32WithCallBack::AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
- cache.Init(0);
+ cache.Init(a, 0);
AllocatorStats stats;
stats.Init();
a->AllocateBatch(&stats, &cache, 32);
@@ -540,7 +545,7 @@ TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
a.Init(kReleaseToOSIntervalNever);
Allocator64::AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
- cache.Init(0);
+ cache.Init(&a, 0);
AllocatorStats stats;
stats.Init();
@@ -717,10 +722,8 @@ TEST(SanitizerCommon, CombinedAllocator64Compact) {
TestCombinedAllocator<Allocator64Compact>();
}
TEST(SanitizerCommon, CombinedRuntimeSelectAllocator) {
- Allocator32or64Compact::UseAllocator1 = false;
- TestCombinedAllocator<Allocator32or64Compact>();
- Allocator32or64Compact::UseAllocator1 = true;
- TestCombinedAllocator<Allocator32or64Compact>();
+ TestCombinedAllocator<Allocator32or64CompactUse1>();
+ TestCombinedAllocator<Allocator32or64CompactUse2>();
}
#endif
@@ -741,7 +744,7 @@ void TestSizeClassAllocatorLocalCache() {
a->Init(kReleaseToOSIntervalNever);
memset(&cache, 0, sizeof(cache));
- cache.Init(0);
+ cache.Init(a, 0);
const uptr kNumAllocs = 10000;
const int kNumIter = 100;
@@ -784,10 +787,8 @@ TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
TestSizeClassAllocatorLocalCache<Allocator64Compact>();
}
TEST(SanitizerCommon, SizeClassAllocator32or64CompactLocalCache) {
- Allocator32or64Compact::UseAllocator1 = false;
- TestSizeClassAllocatorLocalCache<Allocator32or64Compact>();
- Allocator32or64Compact::UseAllocator1 = true;
- TestSizeClassAllocatorLocalCache<Allocator32or64Compact>();
+ TestSizeClassAllocatorLocalCache<Allocator32or64CompactUse1>();
+ TestSizeClassAllocatorLocalCache<Allocator32or64CompactUse2>();
}
#endif
TEST(SanitizerCommon, SizeClassAllocator64VeryCompactLocalCache) {
@@ -922,7 +923,7 @@ void TestSizeClassAllocatorIteration() {
a->Init(kReleaseToOSIntervalNever);
typename Allocator::AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
- cache.Init(0);
+ cache.Init(a, 0);
static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
@@ -1065,7 +1066,7 @@ TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
a->Init(kReleaseToOSIntervalNever);
SpecialAllocator64::AllocatorCache cache;
memset(&cache, 0, sizeof(cache));
- cache.Init(0);
+ cache.Init(a, 0);
// ...one man is on a mission to overflow a region with a series of
// successive allocations.
@@ -1368,10 +1369,8 @@ TEST(SanitizerCommon, SizeClassAllocator64CompactReleaseFreeMemoryToOS) {
TestReleaseFreeMemoryToOS<Allocator64Compact>();
}
TEST(SanitizerCommon, SizeClassAllocator32or64CompactReleaseFreeMemoryToOS) {
- Allocator32or64Compact::UseAllocator1 = false;
- TestReleaseFreeMemoryToOS<Allocator32or64Compact>();
- Allocator32or64Compact::UseAllocator1 = true;
- TestReleaseFreeMemoryToOS<Allocator32or64Compact>();
+ TestReleaseFreeMemoryToOS<Allocator32or64CompactUse1>();
+ TestReleaseFreeMemoryToOS<Allocator32or64CompactUse2>();
}
TEST(SanitizerCommon, SizeClassAllocator64VeryCompactReleaseFreeMemoryToOS) {
diff --git a/compiler-rt/lib/scudo/scudo_allocator_combined.h b/compiler-rt/lib/scudo/scudo_allocator_combined.h
index d61cc9ec1a52..ec36ae3b318f 100644
--- a/compiler-rt/lib/scudo/scudo_allocator_combined.h
+++ b/compiler-rt/lib/scudo/scudo_allocator_combined.h
@@ -50,7 +50,7 @@ class CombinedAllocator {
}
void initCache(AllocatorCache *Cache) {
- Cache->Init(&Stats);
+ Cache->Init(&Primary, &Stats);
}
void destroyCache(AllocatorCache *Cache) {
================
Comment at: compiler-rt/lib/lsan/lsan_allocator.h:47
uptr requested_size : 32;
uptr padding : 22;
#endif
----------------
this patch is larger then expected
could you please move asan/lsan specific stuff into separate patches
================
Comment at: compiler-rt/lib/sanitizer_common/sanitizer_runtime_select_allocator.h:67
+
+ void Init(s32 release_to_os_interval_ms) {
+ // Use the first allocator when the address
----------------
we need to remove GetMaxVirtualAddress from this template
RuntimeSelectAllocator has no address space specific logic
Repository:
rL LLVM
CHANGES SINCE LAST ACTION
https://reviews.llvm.org/D60243/new/
https://reviews.llvm.org/D60243
More information about the llvm-commits
mailing list