[compiler-rt] 2fcfaee - Revert "[scudo] seperate cache retrieval logic"
Chia-hung Duan via llvm-commits
llvm-commits at lists.llvm.org
Mon Jul 31 10:38:14 PDT 2023
Author: Chia-hung Duan
Date: 2023-07-31T17:37:55Z
New Revision: 2fcfaeecfb3389decdc511ece532f905a14b2d8d
URL: https://github.com/llvm/llvm-project/commit/2fcfaeecfb3389decdc511ece532f905a14b2d8d
DIFF: https://github.com/llvm/llvm-project/commit/2fcfaeecfb3389decdc511ece532f905a14b2d8d.diff
LOG: Revert "[scudo] seperate cache retrieval logic"
RoundedSize is supposed to be used on directly mapping. To determine if
a cached block is feasible, the size doesn't need to be rounded in
advance. As a result, the use of RoundedSize may miss some chance of
using cached blocks.
This reverts commit 4c6b8bb87b3452d0bcef83cd0ea712d8426603b8.
Reviewed By: frs513
Differential Revision: https://reviews.llvm.org/D156583
Added:
Modified:
compiler-rt/lib/scudo/standalone/secondary.h
Removed:
################################################################################
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index 407ee2726b2b50..d72ae9bb0c3650 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -89,8 +89,11 @@ struct CachedBlock {
template <typename Config> class MapAllocatorNoCache {
public:
void init(UNUSED s32 ReleaseToOsInterval) {}
- bool retrieve(UNUSED uptr Size, UNUSED CachedBlock &Entry) { return false; }
- void store(UNUSED const Options &Options, LargeBlock::Header *H) { unmap(H); }
+ bool retrieve(UNUSED Options Options, UNUSED uptr Size, UNUSED uptr Alignment,
+ UNUSED LargeBlock::Header **H, UNUSED bool *Zeroed) {
+ return false;
+ }
+ void store(UNUSED Options Options, LargeBlock::Header *H) { unmap(H); }
bool canCache(UNUSED uptr Size) { return false; }
void disable() {}
void enable() {}
@@ -259,9 +262,13 @@ template <typename Config> class MapAllocatorCache {
Entry.MemMap.unmap(Entry.MemMap.getBase(), Entry.MemMap.getCapacity());
}
- bool retrieve(uptr Size, CachedBlock &Entry) EXCLUDES(Mutex) {
+ bool retrieve(Options Options, uptr Size, uptr Alignment,
+ LargeBlock::Header **H, bool *Zeroed) EXCLUDES(Mutex) {
+ const uptr PageSize = getPageSizeCached();
const u32 MaxCount = atomic_load_relaxed(&MaxEntriesCount);
bool Found = false;
+ CachedBlock Entry;
+ uptr HeaderPos = 0;
{
ScopedLock L(Mutex);
if (EntriesCount == 0)
@@ -269,8 +276,18 @@ template <typename Config> class MapAllocatorCache {
for (u32 I = 0; I < MaxCount; I++) {
if (!Entries[I].isValid())
continue;
- if (Size > Entries[I].CommitSize)
+ const uptr CommitBase = Entries[I].CommitBase;
+ const uptr CommitSize = Entries[I].CommitSize;
+ const uptr AllocPos =
+ roundDown(CommitBase + CommitSize - Size, Alignment);
+ HeaderPos =
+ AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize();
+ if (HeaderPos > CommitBase + CommitSize)
continue;
+ if (HeaderPos < CommitBase ||
+ AllocPos > CommitBase + PageSize * MaxUnusedCachePages) {
+ continue;
+ }
Found = true;
Entry = Entries[I];
Entries[I].invalidate();
@@ -278,7 +295,29 @@ template <typename Config> class MapAllocatorCache {
break;
}
}
- return Found;
+ if (!Found)
+ return false;
+
+ *H = reinterpret_cast<LargeBlock::Header *>(
+ LargeBlock::addHeaderTag<Config>(HeaderPos));
+ *Zeroed = Entry.Time == 0;
+ if (useMemoryTagging<Config>(Options))
+ Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
+ uptr NewBlockBegin = reinterpret_cast<uptr>(*H + 1);
+ if (useMemoryTagging<Config>(Options)) {
+ if (*Zeroed) {
+ storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
+ NewBlockBegin);
+ } else if (Entry.BlockBegin < NewBlockBegin) {
+ storeTags(Entry.BlockBegin, NewBlockBegin);
+ } else {
+ storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
+ }
+ }
+ (*H)->CommitBase = Entry.CommitBase;
+ (*H)->CommitSize = Entry.CommitSize;
+ (*H)->MemMap = Entry.MemMap;
+ return true;
}
bool canCache(uptr Size) {
@@ -443,27 +482,6 @@ template <typename Config> class MapAllocator {
}
}
- inline void setHeader(const Options &Options, CachedBlock &Entry,
- LargeBlock::Header *H, bool &Zeroed) {
- Zeroed = Entry.Time == 0;
- if (useMemoryTagging<Config>(Options)) {
- Entry.MemMap.setMemoryPermission(Entry.CommitBase, Entry.CommitSize, 0);
- // Block begins after the LargeBlock::Header
- uptr NewBlockBegin = reinterpret_cast<uptr>(H + 1);
- if (Zeroed) {
- storeTags(LargeBlock::addHeaderTag<Config>(Entry.CommitBase),
- NewBlockBegin);
- } else if (Entry.BlockBegin < NewBlockBegin) {
- storeTags(Entry.BlockBegin, NewBlockBegin);
- } else {
- storeTags(untagPointer(NewBlockBegin), untagPointer(Entry.BlockBegin));
- }
- }
- H->CommitBase = Entry.CommitBase;
- H->CommitSize = Entry.CommitSize;
- H->MemMap = Entry.MemMap;
- }
-
bool canCache(uptr Size) { return Cache.canCache(Size); }
bool setOption(Option O, sptr Value) { return Cache.setOption(O, Value); }
@@ -518,15 +536,7 @@ void *MapAllocator<Config>::allocate(const Options &Options, uptr Size,
if (Alignment < PageSize && Cache.canCache(RoundedSize)) {
LargeBlock::Header *H;
bool Zeroed;
- CachedBlock Entry;
- if (Cache.retrieve(RoundedSize, Entry)) {
- const uptr AllocPos =
- roundDown(Entry.CommitBase + Entry.CommitSize - Size, Alignment);
- const uptr HeaderPos =
- AllocPos - LargeBlock::getHeaderSize() - Chunk::getHeaderSize();
- H = reinterpret_cast<LargeBlock::Header *>(
- LargeBlock::addHeaderTag<Config>(HeaderPos));
- setHeader(Options, Entry, H, Zeroed);
+ if (Cache.retrieve(Options, Size, Alignment, &H, &Zeroed)) {
const uptr BlockEnd = H->CommitBase + H->CommitSize;
if (BlockEndPtr)
*BlockEndPtr = BlockEnd;
More information about the llvm-commits
mailing list