[compiler-rt] r305404 - [sanitizer] Reverting D34152
Kostya Kortchinsky via llvm-commits
llvm-commits at lists.llvm.org
Wed Jun 14 10:32:26 PDT 2017
Author: cryptoad
Date: Wed Jun 14 12:32:26 2017
New Revision: 305404
URL: http://llvm.org/viewvc/llvm-project?rev=305404&view=rev
Log:
[sanitizer] Reverting D34152
Summary:
This broke thread_local_quarantine_pthread_join.cc on some architectures, due
to the overhead of the stashed regions. Reverting while figuring out the best
way to deal with it.
Reviewers: alekseyshl
Reviewed By: alekseyshl
Subscribers: llvm-commits, kubamracek
Differential Revision: https://reviews.llvm.org/D34213
Modified:
compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h
compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h
compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix.cc
compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc
compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_common_test.cc
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h?rev=305404&r1=305403&r2=305404&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h Wed Jun 14 12:32:26 2017
@@ -24,7 +24,7 @@ template<class SizeClassAllocator> struc
// be returned by MmapOrDie().
//
// Region:
-// a result of an allocation of kRegionSize bytes aligned on kRegionSize.
+// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
// Since the regions are aligned by kRegionSize, there are exactly
// kNumPossibleRegions possible regions in the address space and so we keep
// a ByteMap possible_regions to store the size classes of each Region.
@@ -106,7 +106,6 @@ class SizeClassAllocator32 {
void Init(s32 release_to_os_interval_ms) {
possible_regions.TestOnlyInit();
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
- num_stashed_regions = 0;
}
s32 ReleaseToOSIntervalMs() const {
@@ -276,49 +275,15 @@ class SizeClassAllocator32 {
return mem & ~(kRegionSize - 1);
}
- // Allocates a region of kRegionSize bytes, aligned on kRegionSize. If we get
- // more than one region back (in the event the allocation is aligned on the
- // first try), attempt to store the second region into a stash. If the stash
- // is full, just unmap the superfluous memory.
- uptr AllocateRegionSlow(AllocatorStats *stat) {
- uptr map_size = kRegionSize;
- uptr padding_chunk;
- uptr region = reinterpret_cast<uptr>(
- MmapAlignedOrDie(kRegionSize, kRegionSize, "SizeClassAllocator32",
- &padding_chunk));
- if (padding_chunk) {
- // We have an extra region, attempt to stash it.
- CHECK_EQ(padding_chunk, region + kRegionSize);
- bool trim_extra = true;
- {
- SpinMutexLock l(®ions_stash_mutex);
- if (num_stashed_regions < kMaxStashedRegions) {
- regions_stash[num_stashed_regions++] = padding_chunk;
- map_size = 2 * kRegionSize;
- trim_extra = false;
- }
- }
- if (trim_extra)
- UnmapOrDie((void*)padding_chunk, kRegionSize);
- }
- MapUnmapCallback().OnMap(region, map_size);
- stat->Add(AllocatorStatMapped, map_size);
- return region;
- }
-
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
CHECK_LT(class_id, kNumClasses);
- uptr region = 0;
- {
- SpinMutexLock l(®ions_stash_mutex);
- if (num_stashed_regions > 0)
- region = regions_stash[--num_stashed_regions];
- }
- if (!region)
- region = AllocateRegionSlow(stat);
- CHECK(IsAligned(region, kRegionSize));
- possible_regions.set(ComputeRegionId(region), static_cast<u8>(class_id));
- return region;
+ uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
+ "SizeClassAllocator32"));
+ MapUnmapCallback().OnMap(res, kRegionSize);
+ stat->Add(AllocatorStatMapped, kRegionSize);
+ CHECK_EQ(0U, (res & (kRegionSize - 1)));
+ possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
+ return res;
}
SizeClassInfo *GetSizeClassInfo(uptr class_id) {
@@ -351,13 +316,6 @@ class SizeClassAllocator32 {
}
}
- // Unless several threads request regions simultaneously from different size
- // classes, the stash rarely contains more than 1 entry.
- static const uptr kMaxStashedRegions = 8;
- SpinMutex regions_stash_mutex;
- uptr num_stashed_regions;
- uptr regions_stash[kMaxStashedRegions];
-
ByteMap possible_regions;
SizeClassInfo size_class_info_array[kNumClasses];
};
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h?rev=305404&r1=305403&r2=305404&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_common.h Wed Jun 14 12:32:26 2017
@@ -92,15 +92,7 @@ void *MmapFixedOrDie(uptr fixed_addr, up
void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
void *MmapNoAccess(uptr size);
// Map aligned chunk of address space; size and alignment are powers of two.
-// Since the predominant use case of this function is "size == alignment" and
-// the nature of the way the alignment requirement is satisfied (by allocating
-// size+alignment bytes of memory), there's a potential of address space
-// fragmentation. The padding_chunk parameter provides the opportunity to
-// return the contiguous padding of "size" bytes of the allocated chunk if the
-// initial allocation happened to be perfectly aligned and the platform supports
-// partial unmapping of the mapped region.
-void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type,
- uptr *padding_chunk);
+void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
// Disallow access to a memory range. Use MmapFixedNoAccess to allocate an
// unaccessible memory.
bool MprotectNoAccess(uptr addr, uptr size);
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix.cc?rev=305404&r1=305403&r2=305404&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_posix.cc Wed Jun 14 12:32:26 2017
@@ -146,29 +146,22 @@ void UnmapOrDie(void *addr, uptr size) {
}
// We want to map a chunk of address space aligned to 'alignment'.
-// We do it by mapping a bit more and then unmapping redundant pieces.
+// We do it by maping a bit more and then unmaping redundant pieces.
// We probably can do it with fewer syscalls in some OS-dependent way.
-void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type,
- uptr* padding_chunk) {
+void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
CHECK(IsPowerOfTwo(size));
CHECK(IsPowerOfTwo(alignment));
uptr map_size = size + alignment;
uptr map_res = (uptr)MmapOrDie(map_size, mem_type);
uptr map_end = map_res + map_size;
- bool is_aligned = IsAligned(map_res, alignment);
- if (is_aligned && padding_chunk && size == alignment) {
- *padding_chunk = map_res + size;
- return (void *)map_res;
- }
- if (padding_chunk)
- *padding_chunk = 0;
uptr res = map_res;
- if (!is_aligned) {
- res = (map_res + alignment - 1) & ~(alignment - 1);
- UnmapOrDie((void*)map_res, res - map_res);
- }
+ if (res & (alignment - 1)) // Not aligned.
+ res = (map_res + alignment) & ~(alignment - 1);
uptr end = res + size;
- UnmapOrDie((void*)end, map_end - end);
+ if (res != map_res)
+ UnmapOrDie((void*)map_res, res - map_res);
+ if (end != map_end)
+ UnmapOrDie((void*)end, map_end - end);
return (void*)res;
}
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc?rev=305404&r1=305403&r2=305404&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_win.cc Wed Jun 14 12:32:26 2017
@@ -132,14 +132,10 @@ void UnmapOrDie(void *addr, uptr size) {
}
// We want to map a chunk of address space aligned to 'alignment'.
-void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type,
- uptr *padding_chunk) {
+void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type) {
CHECK(IsPowerOfTwo(size));
CHECK(IsPowerOfTwo(alignment));
- if (padding_chunk)
- *padding_chunk = 0;
-
// Windows will align our allocations to at least 64K.
alignment = Max(alignment, GetMmapGranularity());
Modified: compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_common_test.cc
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_common_test.cc?rev=305404&r1=305403&r2=305404&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_common_test.cc (original)
+++ compiler-rt/trunk/lib/sanitizer_common/tests/sanitizer_common_test.cc Wed Jun 14 12:32:26 2017
@@ -77,45 +77,14 @@ TEST(SanitizerCommon, MmapAlignedOrDie)
for (uptr size = 1; size <= 32; size *= 2) {
for (uptr alignment = 1; alignment <= 32; alignment *= 2) {
for (int iter = 0; iter < 100; iter++) {
- uptr res = (uptr)MmapAlignedOrDie(size * PageSize, alignment * PageSize,
- "MmapAlignedOrDieTest", nullptr);
+ uptr res = (uptr)MmapAlignedOrDie(
+ size * PageSize, alignment * PageSize, "MmapAlignedOrDieTest");
EXPECT_EQ(0U, res % (alignment * PageSize));
internal_memset((void*)res, 1, size * PageSize);
UnmapOrDie((void*)res, size * PageSize);
}
}
}
-}
-
-TEST(SanitizerCommon, MmapAlignedOrDiePaddingChunk) {
- uptr PageSize = GetPageSizeCached();
- for (uptr size = 1; size <= 32; size *= 2) {
- for (uptr alignment = 1; alignment <= 32; alignment *= 2) {
- for (int iter = 0; iter < 100; iter++) {
- uptr padding_chunk;
- uptr res = (uptr)MmapAlignedOrDie(size * PageSize, alignment * PageSize,
- "MmapAlignedOrDiePaddingChunkTest", &padding_chunk);
- EXPECT_EQ(0U, res % (alignment * PageSize));
- internal_memset((void*)res, 1, size * PageSize);
- UnmapOrDie((void*)res, size * PageSize);
- if (SANITIZER_WINDOWS || (size != alignment)) {
- // Not supported on Windows or for different size and alignment.
- EXPECT_EQ(0U, padding_chunk);
- continue;
- }
- if (size == 1 && alignment == 1) {
- // mmap returns PageSize aligned chunks, so this is a specific case
- // where we can check that padding_chunk will never be 0.
- EXPECT_NE(0U, padding_chunk);
- }
- if (padding_chunk) {
- EXPECT_EQ(res + size * PageSize, padding_chunk);
- internal_memset((void*)padding_chunk, 1, alignment * PageSize);
- UnmapOrDie((void*)padding_chunk, alignment * PageSize);
- }
- }
- }
- }
}
#if SANITIZER_LINUX
More information about the llvm-commits
mailing list