[compiler-rt] r209744 - tsan: do not use 64-bit atomics in allocator code
Dmitry Vyukov
dvyukov at google.com
Wed May 28 08:22:12 PDT 2014
Author: dvyukov
Date: Wed May 28 10:22:12 2014
New Revision: 209744
URL: http://llvm.org/viewvc/llvm-project?rev=209744&view=rev
Log:
tsan: do not use 64-bit atomics in allocator code
64-bit atomics make porting of asan to 32-bits platforms problematic.
Modified:
compiler-rt/trunk/lib/msan/msan_allocator.cc
compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h
compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc
Modified: compiler-rt/trunk/lib/msan/msan_allocator.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/msan/msan_allocator.cc?rev=209744&r1=209743&r2=209744&view=diff
==============================================================================
--- compiler-rt/trunk/lib/msan/msan_allocator.cc (original)
+++ compiler-rt/trunk/lib/msan/msan_allocator.cc Wed May 28 10:22:12 2014
@@ -185,19 +185,15 @@ static uptr AllocationSize(const void *p
using namespace __msan;
uptr __msan_get_current_allocated_bytes() {
- u64 stats[AllocatorStatCount];
+ uptr stats[AllocatorStatCount];
allocator.GetStats(stats);
- u64 m = stats[AllocatorStatMalloced];
- u64 f = stats[AllocatorStatFreed];
- return m >= f ? m - f : 1;
+ return stats[AllocatorStatAllocated];
}
uptr __msan_get_heap_size() {
- u64 stats[AllocatorStatCount];
+ uptr stats[AllocatorStatCount];
allocator.GetStats(stats);
- u64 m = stats[AllocatorStatMmapped];
- u64 f = stats[AllocatorStatUnmapped];
- return m >= f ? m - f : 1;
+ return stats[AllocatorStatMapped];
}
uptr __msan_get_free_bytes() {
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h?rev=209744&r1=209743&r2=209744&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator.h Wed May 28 10:22:12 2014
@@ -198,14 +198,12 @@ template<class SizeClassAllocator> struc
// Memory allocator statistics
enum AllocatorStat {
- AllocatorStatMalloced,
- AllocatorStatFreed,
- AllocatorStatMmapped,
- AllocatorStatUnmapped,
+ AllocatorStatAllocated,
+ AllocatorStatMapped,
AllocatorStatCount
};
-typedef u64 AllocatorStatCounters[AllocatorStatCount];
+typedef uptr AllocatorStatCounters[AllocatorStatCount];
// Per-thread stats, live in per-thread cache.
class AllocatorStats {
@@ -214,16 +212,21 @@ class AllocatorStats {
internal_memset(this, 0, sizeof(*this));
}
- void Add(AllocatorStat i, u64 v) {
+ void Add(AllocatorStat i, uptr v) {
v += atomic_load(&stats_[i], memory_order_relaxed);
atomic_store(&stats_[i], v, memory_order_relaxed);
}
- void Set(AllocatorStat i, u64 v) {
+ void Sub(AllocatorStat i, uptr v) {
+ v = atomic_load(&stats_[i], memory_order_relaxed) - v;
atomic_store(&stats_[i], v, memory_order_relaxed);
}
- u64 Get(AllocatorStat i) const {
+ void Set(AllocatorStat i, uptr v) {
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ uptr Get(AllocatorStat i) const {
return atomic_load(&stats_[i], memory_order_relaxed);
}
@@ -231,7 +234,7 @@ class AllocatorStats {
friend class AllocatorGlobalStats;
AllocatorStats *next_;
AllocatorStats *prev_;
- atomic_uint64_t stats_[AllocatorStatCount];
+ atomic_uintptr_t stats_[AllocatorStatCount];
};
// Global stats, used for aggregation and querying.
@@ -260,7 +263,7 @@ class AllocatorGlobalStats : public Allo
}
void Get(AllocatorStatCounters s) const {
- internal_memset(s, 0, AllocatorStatCount * sizeof(u64));
+ internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
SpinMutexLock l(&mu_);
const AllocatorStats *stats = this;
for (;;) {
@@ -270,6 +273,9 @@ class AllocatorGlobalStats : public Allo
if (stats == this)
break;
}
+ // All stats must be positive.
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] = ((sptr)s[i]) > 0 ? s[i] : 1;
}
private:
@@ -522,7 +528,7 @@ class SizeClassAllocator64 {
map_size += kUserMapSize;
CHECK_GE(region->mapped_user + map_size, end_idx);
MapWithCallback(region_beg + region->mapped_user, map_size);
- stat->Add(AllocatorStatMmapped, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
region->mapped_user += map_size;
}
uptr total_count = (region->mapped_user - beg_idx - size)
@@ -841,7 +847,7 @@ class SizeClassAllocator32 {
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
"SizeClassAllocator32"));
MapUnmapCallback().OnMap(res, kRegionSize);
- stat->Add(AllocatorStatMmapped, kRegionSize);
+ stat->Add(AllocatorStatMapped, kRegionSize);
CHECK_EQ(0U, (res & (kRegionSize - 1)));
possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
return res;
@@ -907,7 +913,7 @@ struct SizeClassAllocatorLocalCache {
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
- stats_.Add(AllocatorStatMalloced, SizeClassMap::Size(class_id));
+ stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0))
Refill(allocator, class_id);
@@ -922,7 +928,7 @@ struct SizeClassAllocatorLocalCache {
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
- stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id));
+ stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
PerClass *c = &per_class_[class_id];
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
@@ -1033,8 +1039,8 @@ class LargeMmapAllocator {
stats.currently_allocated += map_size;
stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
stats.by_size_log[size_log]++;
- stat->Add(AllocatorStatMalloced, map_size);
- stat->Add(AllocatorStatMmapped, map_size);
+ stat->Add(AllocatorStatAllocated, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
}
return reinterpret_cast<void*>(res);
}
@@ -1052,8 +1058,8 @@ class LargeMmapAllocator {
chunks_sorted_ = false;
stats.n_frees++;
stats.currently_allocated -= h->map_size;
- stat->Add(AllocatorStatFreed, h->map_size);
- stat->Add(AllocatorStatUnmapped, h->map_size);
+ stat->Sub(AllocatorStatAllocated, h->map_size);
+ stat->Sub(AllocatorStatMapped, h->map_size);
}
MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
Modified: compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc?rev=209744&r1=209743&r2=209744&view=diff
==============================================================================
--- compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc (original)
+++ compiler-rt/trunk/lib/tsan/rtl/tsan_mman.cc Wed May 28 10:22:12 2014
@@ -217,19 +217,15 @@ using namespace __tsan;
extern "C" {
uptr __tsan_get_current_allocated_bytes() {
- u64 stats[AllocatorStatCount];
+ uptr stats[AllocatorStatCount];
allocator()->GetStats(stats);
- u64 m = stats[AllocatorStatMalloced];
- u64 f = stats[AllocatorStatFreed];
- return m >= f ? m - f : 1;
+ return stats[AllocatorStatAllocated];
}
uptr __tsan_get_heap_size() {
- u64 stats[AllocatorStatCount];
+ uptr stats[AllocatorStatCount];
allocator()->GetStats(stats);
- u64 m = stats[AllocatorStatMmapped];
- u64 f = stats[AllocatorStatUnmapped];
- return m >= f ? m - f : 1;
+ return stats[AllocatorStatMapped];
}
uptr __tsan_get_free_bytes() {
More information about the llvm-commits
mailing list