[compiler-rt] r303883 - [sanitizer] Revert rL303879 as it breaks Windows
Kostya Kortchinsky via llvm-commits
llvm-commits at lists.llvm.org
Thu May 25 09:54:44 PDT 2017
Author: cryptoad
Date: Thu May 25 11:54:44 2017
New Revision: 303883
URL: http://llvm.org/viewvc/llvm-project?rev=303883&view=rev
Log:
[sanitizer] Revert rL303879 as it breaks Windows
Summary:
Apparently Windows's `UnmapOrDie` doesn't support partial unmapping. Which
makes the new region allocation technique not Windows compliant.
Reviewers: alekseyshl, dvyukov
Reviewed By: alekseyshl
Subscribers: llvm-commits, kubamracek
Differential Revision: https://reviews.llvm.org/D33554
Modified:
compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h
Modified: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h
URL: http://llvm.org/viewvc/llvm-project/compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h?rev=303883&r1=303882&r2=303883&view=diff
==============================================================================
--- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h (original)
+++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_primary32.h Thu May 25 11:54:44 2017
@@ -24,7 +24,7 @@ template<class SizeClassAllocator> struc
// be returned by MmapOrDie().
//
// Region:
-// a result of an allocation of kRegionSize bytes aligned on kRegionSize.
+// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
// Since the regions are aligned by kRegionSize, there are exactly
// kNumPossibleRegions possible regions in the address space and so we keep
// a ByteMap possible_regions to store the size classes of each Region.
@@ -106,7 +106,6 @@ class SizeClassAllocator32 {
void Init(s32 release_to_os_interval_ms) {
possible_regions.TestOnlyInit();
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
- num_stashed_regions = 0;
}
s32 ReleaseToOSIntervalMs() const {
@@ -276,52 +275,15 @@ class SizeClassAllocator32 {
return mem & ~(kRegionSize - 1);
}
- // Allocates a region of kRegionSize bytes, aligned on kRegionSize, by first
- // allocating 2 * kRegionSize. If the result of the initial allocation is
- // aligned, split it in two, and attempt to store the second part into a
- // stash. In the event the stash is full, just unmap the superfluous memory.
- // If the initial allocation is not aligned, trim the memory before and after.
- uptr AllocateRegionSlow(AllocatorStats *stat) {
- uptr map_size = 2 * kRegionSize;
- uptr map_res = (uptr)MmapOrDie(map_size, "SizeClassAllocator32");
- uptr region = map_res;
- bool trim_region = true;
- if (IsAligned(region, kRegionSize)) {
- // We are aligned, attempt to stash the second half.
- SpinMutexLock l(®ions_stash_mutex);
- if (num_stashed_regions < kMaxStashedRegions) {
- regions_stash[num_stashed_regions++] = region + kRegionSize;
- trim_region = false;
- }
- }
- // Trim the superfluous memory in front and behind us.
- if (trim_region) {
- // If map_res is already aligned on kRegionSize (in the event of a full
- // stash), the following two lines amount to a no-op.
- region = (map_res + kRegionSize - 1) & ~(kRegionSize - 1);
- UnmapOrDie((void*)map_res, region - map_res);
- uptr end = region + kRegionSize;
- UnmapOrDie((void*)end, map_res + map_size - end);
- map_size = kRegionSize;
- }
- MapUnmapCallback().OnMap(region, map_size);
- stat->Add(AllocatorStatMapped, map_size);
- return region;
- }
-
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
CHECK_LT(class_id, kNumClasses);
- uptr region = 0;
- {
- SpinMutexLock l(®ions_stash_mutex);
- if (num_stashed_regions > 0)
- region = regions_stash[--num_stashed_regions];
- }
- if (!region)
- region = AllocateRegionSlow(stat);
- CHECK(IsAligned(region, kRegionSize));
- possible_regions.set(ComputeRegionId(region), static_cast<u8>(class_id));
- return region;
+ uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
+ "SizeClassAllocator32"));
+ MapUnmapCallback().OnMap(res, kRegionSize);
+ stat->Add(AllocatorStatMapped, kRegionSize);
+ CHECK_EQ(0U, (res & (kRegionSize - 1)));
+ possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
+ return res;
}
SizeClassInfo *GetSizeClassInfo(uptr class_id) {
@@ -354,13 +316,6 @@ class SizeClassAllocator32 {
}
}
- // Unless several threads request regions simultaneously from different size
- // classes, the stash rarely contains more than 1 entry.
- static const uptr kMaxStashedRegions = 8;
- SpinMutex regions_stash_mutex;
- uptr num_stashed_regions;
- uptr regions_stash[kMaxStashedRegions];
-
ByteMap possible_regions;
SizeClassInfo size_class_info_array[kNumClasses];
};
More information about the llvm-commits
mailing list