[compiler-rt] r182994 - [lsan] Use the fast version of GetBlockBegin for leak checking in LSan and ASan.
Sergey Matveev
earthdok at google.com
Fri May 31 04:13:46 PDT 2013
Author: smatveev
Date: Fri May 31 06:13:45 2013
New Revision: 182994
URL: http://llvm.org/viewvc/llvm-project?rev=182994&view=rev
Log:
[lsan] Use the fast version of GetBlockBegin for leak checking in LSan and ASan.
Modified:
compiler-rt/trunk/lib/asan/asan_allocator2.cc
compiler-rt/trunk/lib/lsan/lsan_allocator.cc
compiler-rt/trunk/lib/lsan/lsan_common.cc
compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h
Modified: compiler-rt/trunk/lib/asan/asan_allocator2.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/asan/asan_allocator2.cc?rev=182994&r1=182993&r2=182994&view=diff
==============================================================================
--- compiler-rt/trunk/lib/asan/asan_allocator2.cc (original)
+++ compiler-rt/trunk/lib/asan/asan_allocator2.cc Fri May 31 06:13:45 2013
@@ -528,9 +528,8 @@ static void *Reallocate(void *old_ptr, u
return new_ptr;
}
-static AsanChunk *GetAsanChunkByAddr(uptr p) {
- void *ptr = reinterpret_cast<void *>(p);
- uptr alloc_beg = reinterpret_cast<uptr>(allocator.GetBlockBegin(ptr));
+// Assumes alloc_beg == allocator.GetBlockBegin(alloc_beg).
+static AsanChunk *GetAsanChunk(void *alloc_beg) {
if (!alloc_beg) return 0;
uptr *memalign_magic = reinterpret_cast<uptr *>(alloc_beg);
if (memalign_magic[0] == kMemalignMagic) {
@@ -538,13 +537,13 @@ static AsanChunk *GetAsanChunkByAddr(upt
CHECK(m->from_memalign);
return m;
}
- if (!allocator.FromPrimary(ptr)) {
- uptr *meta = reinterpret_cast<uptr *>(
- allocator.GetMetaData(reinterpret_cast<void *>(alloc_beg)));
+ if (!allocator.FromPrimary(alloc_beg)) {
+ uptr *meta = reinterpret_cast<uptr *>(allocator.GetMetaData(alloc_beg));
AsanChunk *m = reinterpret_cast<AsanChunk *>(meta[1]);
return m;
}
- uptr actual_size = allocator.GetActuallyAllocatedSize(ptr);
+ uptr actual_size =
+ allocator.GetActuallyAllocatedSize(alloc_beg);
CHECK_LE(actual_size, SizeClassMap::kMaxSize);
// We know the actually allocted size, but we don't know the redzone size.
// Just try all possible redzone sizes.
@@ -554,11 +553,23 @@ static AsanChunk *GetAsanChunkByAddr(upt
if (ComputeRZLog(max_possible_size) != rz_log)
continue;
return reinterpret_cast<AsanChunk *>(
- alloc_beg + rz_size - kChunkHeaderSize);
+ reinterpret_cast<uptr>(alloc_beg) + rz_size - kChunkHeaderSize);
}
return 0;
}
+static AsanChunk *GetAsanChunkByAddr(uptr p) {
+ void *alloc_beg = allocator.GetBlockBegin(reinterpret_cast<void *>(p));
+ return GetAsanChunk(alloc_beg);
+}
+
+// Allocator must be locked when this function is called.
+static AsanChunk *GetAsanChunkByAddrFastLocked(uptr p) {
+ void *alloc_beg =
+ allocator.GetBlockBeginFastLocked(reinterpret_cast<void *>(p));
+ return GetAsanChunk(alloc_beg);
+}
+
static uptr AllocationSize(uptr p) {
AsanChunk *m = GetAsanChunkByAddr(p);
if (!m) return 0;
@@ -721,7 +732,7 @@ void GetAllocatorGlobalRange(uptr *begin
void *PointsIntoChunk(void* p) {
uptr addr = reinterpret_cast<uptr>(p);
- __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(addr);
+ __asan::AsanChunk *m = __asan::GetAsanChunkByAddrFastLocked(addr);
if (!m) return 0;
uptr chunk = m->Beg();
if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr))
@@ -730,7 +741,8 @@ void *PointsIntoChunk(void* p) {
}
void *GetUserBegin(void *p) {
- __asan::AsanChunk *m = __asan::GetAsanChunkByAddr(reinterpret_cast<uptr>(p));
+ __asan::AsanChunk *m =
+ __asan::GetAsanChunkByAddrFastLocked(reinterpret_cast<uptr>(p));
CHECK(m);
return reinterpret_cast<void *>(m->Beg());
}
Modified: compiler-rt/trunk/lib/lsan/lsan_allocator.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/lsan/lsan_allocator.cc?rev=182994&r1=182993&r2=182994&view=diff
==============================================================================
--- compiler-rt/trunk/lib/lsan/lsan_allocator.cc (original)
+++ compiler-rt/trunk/lib/lsan/lsan_allocator.cc Fri May 31 06:13:45 2013
@@ -133,8 +133,7 @@ void GetAllocatorGlobalRange(uptr *begin
}
void *PointsIntoChunk(void* p) {
- if (!allocator.PointerIsMine(p)) return 0;
- void *chunk = allocator.GetBlockBegin(p);
+ void *chunk = allocator.GetBlockBeginFastLocked(p);
if (!chunk) return 0;
// LargeMmapAllocator considers pointers to the meta-region of a chunk to be
// valid, but we don't want that.
Modified: compiler-rt/trunk/lib/lsan/lsan_common.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/lsan/lsan_common.cc?rev=182994&r1=182993&r2=182994&view=diff
==============================================================================
--- compiler-rt/trunk/lib/lsan/lsan_common.cc (original)
+++ compiler-rt/trunk/lib/lsan/lsan_common.cc Fri May 31 06:13:45 2013
@@ -236,7 +236,7 @@ static void LockAndSuspendThreads(StopTh
LockThreadRegistry();
LockAllocator();
StopTheWorld(callback, arg);
- // Allocator must be unlocked by the callback.
+ UnlockAllocator();
UnlockThreadRegistry();
}
@@ -293,8 +293,6 @@ static void DoLeakCheckCallback(const Su
void *arg) {
LeakCheckResult *result = reinterpret_cast<LeakCheckResult *>(arg);
CHECK_EQ(*result, kFatalError);
- // Allocator must not be locked when we call GetRegionBegin().
- UnlockAllocator();
ClassifyAllChunks(suspended_threads);
LeakReport leak_report;
CollectLeaks(&leak_report);
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h?rev=182994&r1=182993&r2=182994&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h Fri May 31 06:13:45 2013
@@ -1043,10 +1043,9 @@ class LargeMmapAllocator {
return GetUser(h);
}
- // This function does the same as GetBlockBegin, but much faster.
- // It may be called only in a single-threaded context, e.g. when all other
- // threads are suspended or joined.
- void *GetBlockBeginFastSingleThreaded(void *ptr) {
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *ptr) {
uptr p = reinterpret_cast<uptr>(ptr);
uptr n = n_chunks_;
if (!n) return 0;
@@ -1238,6 +1237,14 @@ class CombinedAllocator {
return secondary_.GetBlockBegin(p);
}
+ // This function does the same as GetBlockBegin, but is much faster.
+ // Must be called with the allocator locked.
+ void *GetBlockBeginFastLocked(void *p) {
+ if (primary_.PointerIsMine(p))
+ return primary_.GetBlockBegin(p);
+ return secondary_.GetBlockBeginFastLocked(p);
+ }
+
uptr GetActuallyAllocatedSize(void *p) {
if (primary_.PointerIsMine(p))
return primary_.GetActuallyAllocatedSize(p);
More information about the llvm-commits
mailing list