[PATCH] D33454: [sanitizer] Change the 32-bit Primary AllocateRegion to reduce fragmentation
Kostya Kortchinsky via Phabricator via llvm-commits
llvm-commits at lists.llvm.org
Wed May 24 09:25:00 PDT 2017
cryptoad updated this revision to Diff 100113.
cryptoad added a comment.
As per review feedback, move the new code into its own function.
Additionally, change the top file comment to reflect the new way of doing
things.
https://reviews.llvm.org/D33454
Files:
lib/sanitizer_common/sanitizer_allocator_primary32.h
Index: lib/sanitizer_common/sanitizer_allocator_primary32.h
===================================================================
--- lib/sanitizer_common/sanitizer_allocator_primary32.h
+++ lib/sanitizer_common/sanitizer_allocator_primary32.h
@@ -24,7 +24,7 @@
// be returned by MmapOrDie().
//
// Region:
-// a result of a single call to MmapAlignedOrDie(kRegionSize, kRegionSize).
+// a result of an allocation of kRegionSize bytes aligned on kRegionSize.
// Since the regions are aligned by kRegionSize, there are exactly
// kNumPossibleRegions possible regions in the address space and so we keep
// a ByteMap possible_regions to store the size classes of each Region.
@@ -106,6 +106,7 @@
void Init(s32 release_to_os_interval_ms) {
possible_regions.TestOnlyInit();
internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
+ num_stashed_regions = 0;
}
s32 ReleaseToOSIntervalMs() const {
@@ -275,13 +276,51 @@
return mem & ~(kRegionSize - 1);
}
+ // Allocates a region of kRegionSize bytes, aligned on kRegionSize, by first
+ // allocating 2 * kRegionSize. If the result of the initial allocation is
+ // aligned, split it in two, and attempt to store the second part into a
+ // stash. In the event the stash is full, just unmap the superfluous memory.
+ // If the initial allocation is not aligned, trim the memory before and after.
+ uptr AllocateRegionSlow(AllocatorStats *stat) {
+ uptr map_size = 2 * kRegionSize;
+ uptr map_res = (uptr)MmapOrDie(map_size, "SizeClassAllocator32");
+ uptr map_end = map_res + map_size;
+ uptr res = map_res;
+ bool extra_region = false;
+ if (IsAligned(res, kRegionSize)) {
+ // We are aligned, attempt to stash the second half.
+ SpinMutexLock l(®ions_stash_mutex);
+ if (num_stashed_regions < kMaxStashedRegions) {
+ regions_stash[num_stashed_regions++] = res + kRegionSize;
+ extra_region = true;
+ }
+ } else {
+ // We are not aligned, trim the memory in front of us.
+ res = (map_res + kRegionSize - 1) & ~(kRegionSize - 1);
+ UnmapOrDie((void*)map_res, res - map_res);
+ }
+ if (!extra_region) {
+ // We have superfluous memory behind us, trim it.
+ uptr end = res + kRegionSize;
+ UnmapOrDie((void*)end, map_end - end);
+ map_size -= kRegionSize;
+ }
+ MapUnmapCallback().OnMap(res, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
+ return res;
+ }
+
uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
CHECK_LT(class_id, kNumClasses);
- uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
- "SizeClassAllocator32"));
- MapUnmapCallback().OnMap(res, kRegionSize);
- stat->Add(AllocatorStatMapped, kRegionSize);
- CHECK_EQ(0U, (res & (kRegionSize - 1)));
+ uptr res = 0;
+ {
+ SpinMutexLock l(®ions_stash_mutex);
+ if (num_stashed_regions > 0)
+ res = regions_stash[--num_stashed_regions];
+ }
+ if (!res)
+ res = AllocateRegionSlow(stat);
+ CHECK(IsAligned(res, kRegionSize));
possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
return res;
}
@@ -316,8 +355,11 @@
}
}
+ static const uptr kMaxStashedRegions = 8;
+ SpinMutex regions_stash_mutex;
+ uptr num_stashed_regions;
+ uptr regions_stash[kMaxStashedRegions];
+
ByteMap possible_regions;
SizeClassInfo size_class_info_array[kNumClasses];
};
-
-
-------------- next part --------------
A non-text attachment was scrubbed...
Name: D33454.100113.patch
Type: text/x-patch
Size: 3516 bytes
Desc: not available
URL: <http://lists.llvm.org/pipermail/llvm-commits/attachments/20170524/475a1f18/attachment.bin>
More information about the llvm-commits
mailing list