[compiler-rt] ee7b629 - scudo: Don't memset previously released cached pages in the secondary allocator.
Peter Collingbourne via llvm-commits
llvm-commits at lists.llvm.org
Thu Nov 5 09:25:10 PST 2020
Author: Peter Collingbourne
Date: 2020-11-05T09:24:50-08:00
New Revision: ee7b629df27113c2752814efdec960042f799b03
URL: https://github.com/llvm/llvm-project/commit/ee7b629df27113c2752814efdec960042f799b03
DIFF: https://github.com/llvm/llvm-project/commit/ee7b629df27113c2752814efdec960042f799b03.diff
LOG: scudo: Don't memset previously released cached pages in the secondary allocator.
There is no need to memset released pages because they are already
zero. On db845c, before:
BM_stdlib_malloc_free_default/131072 34562 ns 34547 ns 20258 bytes_per_second=3.53345G/s
after:
BM_stdlib_malloc_free_default/131072 29618 ns 29589 ns 23485 bytes_per_second=4.12548G/s
Differential Revision: https://reviews.llvm.org/D90814
Added:
Modified:
compiler-rt/lib/scudo/standalone/secondary.h
Removed:
################################################################################
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index ca86d2dd212d..eda88862cb07 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -52,7 +52,8 @@ class MapAllocatorNoCache {
public:
void initLinkerInitialized(UNUSED s32 ReleaseToOsInterval) {}
void init(UNUSED s32 ReleaseToOsInterval) {}
- bool retrieve(UNUSED uptr Size, UNUSED LargeBlock::Header **H) {
+ bool retrieve(UNUSED uptr Size, UNUSED LargeBlock::Header **H,
+ UNUSED bool *Zeroed) {
return false;
}
bool store(UNUSED LargeBlock::Header *H) { return false; }
@@ -126,7 +127,7 @@ class MapAllocatorCache {
return EntryCached;
}
- bool retrieve(uptr Size, LargeBlock::Header **H) {
+ bool retrieve(uptr Size, LargeBlock::Header **H, bool *Zeroed) {
const uptr PageSize = getPageSizeCached();
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
ScopedLock L(Mutex);
@@ -141,6 +142,7 @@ class MapAllocatorCache {
if (Size < BlockSize - PageSize * 4U)
continue;
*H = reinterpret_cast<LargeBlock::Header *>(Entries[I].Block);
+ *Zeroed = Entries[I].Time == 0;
Entries[I].Block = 0;
(*H)->BlockEnd = Entries[I].BlockEnd;
(*H)->MapBase = Entries[I].MapBase;
@@ -328,12 +330,13 @@ void *MapAllocator<CacheT>::allocate(uptr Size, uptr AlignmentHint,
if (AlignmentHint < PageSize && Cache.canCache(RoundedSize)) {
LargeBlock::Header *H;
- if (Cache.retrieve(RoundedSize, &H)) {
+ bool Zeroed;
+ if (Cache.retrieve(RoundedSize, &H, &Zeroed)) {
if (BlockEnd)
*BlockEnd = H->BlockEnd;
void *Ptr = reinterpret_cast<void *>(reinterpret_cast<uptr>(H) +
LargeBlock::getHeaderSize());
- if (FillContents)
+ if (FillContents && !Zeroed)
memset(Ptr, FillContents == ZeroFill ? 0 : PatternFillByte,
H->BlockEnd - reinterpret_cast<uptr>(Ptr));
const uptr BlockSize = H->BlockEnd - reinterpret_cast<uptr>(H);
More information about the llvm-commits
mailing list