[compiler-rt] 5f91c7b - [scudo][standalone] Allow setting release to OS
Kostya Kortchinsky via llvm-commits
llvm-commits at lists.llvm.org
Fri Feb 14 12:57:48 PST 2020
Author: Christopher Ferris
Date: 2020-02-14T12:57:34-08:00
New Revision: 5f91c7b9805729e94ff7e54f3f85e525f6f2427c
URL: https://github.com/llvm/llvm-project/commit/5f91c7b9805729e94ff7e54f3f85e525f6f2427c
DIFF: https://github.com/llvm/llvm-project/commit/5f91c7b9805729e94ff7e54f3f85e525f6f2427c.diff
LOG: [scudo][standalone] Allow setting release to OS
Summary:
Add a method to set the release to OS value as the system runs,
and allow this to be set differently in the primary and the secondary.
Also, add a default value to use for primary and secondary. This
allows Android to have a default that is different for
primary/secondary.
Update mallopt to support setting the release to OS value.
Reviewers: pcc, cryptoad
Reviewed By: cryptoad
Subscribers: cryptoad, jfb, #sanitizers, llvm-commits
Tags: #sanitizers, #llvm
Differential Revision: https://reviews.llvm.org/D74448
Added:
Modified:
compiler-rt/lib/scudo/standalone/allocator_config.h
compiler-rt/lib/scudo/standalone/combined.h
compiler-rt/lib/scudo/standalone/flags.inc
compiler-rt/lib/scudo/standalone/primary32.h
compiler-rt/lib/scudo/standalone/primary64.h
compiler-rt/lib/scudo/standalone/secondary.h
compiler-rt/lib/scudo/standalone/wrappers_c.inc
Removed:
################################################################################
diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.h b/compiler-rt/lib/scudo/standalone/allocator_config.h
index 3d338501ae4a..ad2a17ef7014 100644
--- a/compiler-rt/lib/scudo/standalone/allocator_config.h
+++ b/compiler-rt/lib/scudo/standalone/allocator_config.h
@@ -40,15 +40,15 @@ struct AndroidConfig {
using SizeClassMap = AndroidSizeClassMap;
#if SCUDO_CAN_USE_PRIMARY64
// 256MB regions
- typedef SizeClassAllocator64<SizeClassMap, 28U,
+ typedef SizeClassAllocator64<SizeClassMap, 28U, 1000, 1000,
/*MaySupportMemoryTagging=*/true>
Primary;
#else
// 256KB regions
- typedef SizeClassAllocator32<SizeClassMap, 18U> Primary;
+ typedef SizeClassAllocator32<SizeClassMap, 18U, 1000, 1000> Primary;
#endif
// Cache blocks up to 2MB
- typedef MapAllocator<MapAllocatorCache<32U, 2UL << 20>> Secondary;
+ typedef MapAllocator<MapAllocatorCache<32U, 2UL << 20, 0, 1000>> Secondary;
template <class A>
using TSDRegistryT = TSDRegistrySharedT<A, 2U>; // Shared, max 2 TSDs.
};
@@ -57,12 +57,12 @@ struct AndroidSvelteConfig {
using SizeClassMap = SvelteSizeClassMap;
#if SCUDO_CAN_USE_PRIMARY64
// 128MB regions
- typedef SizeClassAllocator64<SizeClassMap, 27U> Primary;
+ typedef SizeClassAllocator64<SizeClassMap, 27U, 1000, 1000> Primary;
#else
// 64KB regions
- typedef SizeClassAllocator32<SizeClassMap, 16U> Primary;
+ typedef SizeClassAllocator32<SizeClassMap, 16U, 1000, 1000> Primary;
#endif
- typedef MapAllocator<MapAllocatorCache<4U, 1UL << 18>> Secondary;
+ typedef MapAllocator<MapAllocatorCache<4U, 1UL << 18, 0, 0>> Secondary;
template <class A>
using TSDRegistryT = TSDRegistrySharedT<A, 1U>; // Shared, only 1 TSD.
};
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index e8390a7b44f1..f49fc9aac84c 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -32,6 +32,8 @@ extern "C" inline void EmptyCallback() {}
namespace scudo {
+enum class Option { ReleaseInterval };
+
template <class Params, void (*PostInitCallback)(void) = EmptyCallback>
class Allocator {
public:
@@ -624,8 +626,14 @@ class Allocator {
return Options.MayReturnNull;
}
- // TODO(kostyak): implement this as a "backend" to mallopt.
- bool setOption(UNUSED uptr Option, UNUSED uptr Value) { return false; }
+ bool setOption(Option O, sptr Value) {
+ if (O == Option::ReleaseInterval) {
+ Primary.setReleaseToOsIntervalMs(static_cast<s32>(Value));
+ Secondary.setReleaseToOsIntervalMs(static_cast<s32>(Value));
+ return true;
+ }
+ return false;
+ }
// Return the usable size for a given chunk. Technically we lie, as we just
// report the actual size of a chunk. This is done to counteract code actively
diff --git a/compiler-rt/lib/scudo/standalone/flags.inc b/compiler-rt/lib/scudo/standalone/flags.inc
index 27aa969e608a..342af1c79ad6 100644
--- a/compiler-rt/lib/scudo/standalone/flags.inc
+++ b/compiler-rt/lib/scudo/standalone/flags.inc
@@ -45,6 +45,6 @@ SCUDO_FLAG(bool, may_return_null, true,
"returning NULL in otherwise non-fatal error scenarios, eg: OOM, "
"invalid allocation alignments, etc.")
-SCUDO_FLAG(int, release_to_os_interval_ms, SCUDO_ANDROID ? 1000 : 5000,
+SCUDO_FLAG(int, release_to_os_interval_ms, SCUDO_ANDROID ? INT32_MIN : 5000,
"Interval (in milliseconds) at which to attempt release of unused "
"memory to the OS. Negative values disable the feature.")
diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index 294043930e86..79345cb348b6 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -38,14 +38,18 @@ namespace scudo {
// Memory used by this allocator is never unmapped but can be partially
// reclaimed if the platform allows for it.
-template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
+template <class SizeClassMapT, uptr RegionSizeLog,
+ s32 MinReleaseToOsIntervalMs = INT32_MIN,
+ s32 MaxReleaseToOsIntervalMs = INT32_MAX> class SizeClassAllocator32 {
public:
typedef SizeClassMapT SizeClassMap;
// The bytemap can only track UINT8_MAX - 1 classes.
static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), "");
// Regions should be large enough to hold the largest Block.
static_assert((1UL << RegionSizeLog) >= SizeClassMap::MaxSize, "");
- typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog> ThisT;
+ typedef SizeClassAllocator32<SizeClassMapT, RegionSizeLog,
+ MinReleaseToOsIntervalMs,
+ MaxReleaseToOsIntervalMs> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
typedef typename CacheT::TransferBatch TransferBatch;
static const bool SupportsMemoryTagging = false;
@@ -78,7 +82,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
Sci->CanRelease = (I != SizeClassMap::BatchClassId) &&
(getSizeByClassId(I) >= (PageSize / 32));
}
- ReleaseToOsIntervalMs = ReleaseToOsInterval;
+ setReleaseToOsIntervalMs(ReleaseToOsInterval);
}
void init(s32 ReleaseToOsInterval) {
memset(this, 0, sizeof(*this));
@@ -176,6 +180,15 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
getStats(Str, I, 0);
}
+ void setReleaseToOsIntervalMs(s32 Interval) {
+ if (Interval >= MaxReleaseToOsIntervalMs) {
+ Interval = MaxReleaseToOsIntervalMs;
+ } else if (Interval <= MinReleaseToOsIntervalMs) {
+ Interval = MinReleaseToOsIntervalMs;
+ }
+ atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed);
+ }
+
uptr releaseToOS() {
uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
@@ -356,6 +369,10 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased);
}
+ s32 getReleaseToOsIntervalMs() {
+ return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed);
+ }
+
NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId,
bool Force = false) {
const uptr BlockSize = getSizeByClassId(ClassId);
@@ -374,7 +391,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
}
if (!Force) {
- const s32 IntervalMs = ReleaseToOsIntervalMs;
+ const s32 IntervalMs = getReleaseToOsIntervalMs();
if (IntervalMs < 0)
return 0;
if (Sci->ReleaseInfo.LastReleaseAtNs +
@@ -414,7 +431,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
// through the whole NumRegions.
uptr MinRegionIndex;
uptr MaxRegionIndex;
- s32 ReleaseToOsIntervalMs;
+ atomic_s32 ReleaseToOsIntervalMs;
// Unless several threads request regions simultaneously from
diff erent size
// classes, the stash rarely contains more than 1 entry.
static constexpr uptr MaxStashedRegions = 4;
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index 9d8dcac6562a..bc31db88ebb8 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -40,11 +40,15 @@ namespace scudo {
// released if the platform allows for it.
template <class SizeClassMapT, uptr RegionSizeLog,
+ s32 MinReleaseToOsIntervalMs = INT32_MIN,
+ s32 MaxReleaseToOsIntervalMs = INT32_MAX,
bool MaySupportMemoryTagging = false>
class SizeClassAllocator64 {
public:
typedef SizeClassMapT SizeClassMap;
typedef SizeClassAllocator64<SizeClassMap, RegionSizeLog,
+ MinReleaseToOsIntervalMs,
+ MaxReleaseToOsIntervalMs,
MaySupportMemoryTagging>
ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
@@ -90,7 +94,7 @@ class SizeClassAllocator64 {
(getSizeByClassId(I) >= (PageSize / 32));
Region->RandState = getRandomU32(&Seed);
}
- ReleaseToOsIntervalMs = ReleaseToOsInterval;
+ setReleaseToOsIntervalMs(ReleaseToOsInterval);
if (SupportsMemoryTagging)
UseMemoryTagging = systemSupportsMemoryTagging();
@@ -186,6 +190,15 @@ class SizeClassAllocator64 {
getStats(Str, I, 0);
}
+ void setReleaseToOsIntervalMs(s32 Interval) {
+ if (Interval >= MaxReleaseToOsIntervalMs) {
+ Interval = MaxReleaseToOsIntervalMs;
+ } else if (Interval <= MinReleaseToOsIntervalMs) {
+ Interval = MinReleaseToOsIntervalMs;
+ }
+ atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed);
+ }
+
uptr releaseToOS() {
uptr TotalReleasedBytes = 0;
for (uptr I = 0; I < NumClasses; I++) {
@@ -241,7 +254,7 @@ class SizeClassAllocator64 {
uptr PrimaryBase;
RegionInfo *RegionInfoArray;
MapPlatformData Data;
- s32 ReleaseToOsIntervalMs;
+ atomic_s32 ReleaseToOsIntervalMs;
bool UseMemoryTagging;
RegionInfo *getRegionInfo(uptr ClassId) const {
@@ -375,6 +388,10 @@ class SizeClassAllocator64 {
getRegionBaseByClassId(ClassId));
}
+ s32 getReleaseToOsIntervalMs() {
+ return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed);
+ }
+
NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId,
bool Force = false) {
const uptr BlockSize = getSizeByClassId(ClassId);
@@ -394,7 +411,7 @@ class SizeClassAllocator64 {
}
if (!Force) {
- const s32 IntervalMs = ReleaseToOsIntervalMs;
+ const s32 IntervalMs = getReleaseToOsIntervalMs();
if (IntervalMs < 0)
return 0;
if (Region->ReleaseInfo.LastReleaseAtNs +
diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h
index deba7a930d98..8ae8108b2eaa 100644
--- a/compiler-rt/lib/scudo/standalone/secondary.h
+++ b/compiler-rt/lib/scudo/standalone/secondary.h
@@ -62,7 +62,9 @@ class MapAllocatorNoCache {
void releaseToOS() {}
};
-template <uptr MaxEntriesCount = 32U, uptr MaxEntrySize = 1UL << 19>
+template <uptr MaxEntriesCount = 32U, uptr MaxEntrySize = 1UL << 19,
+ s32 MinReleaseToOsIntervalMs = INT32_MIN,
+ s32 MaxReleaseToOsIntervalMs = INT32_MAX>
class MapAllocatorCache {
public:
// Fuchsia doesn't allow releasing Secondary blocks yet. Note that 0 length
@@ -71,7 +73,7 @@ class MapAllocatorCache {
static_assert(!SCUDO_FUCHSIA || MaxEntriesCount == 0U, "");
void initLinkerInitialized(s32 ReleaseToOsInterval) {
- ReleaseToOsIntervalMs = ReleaseToOsInterval;
+ setReleaseToOsIntervalMs(ReleaseToOsInterval);
}
void init(s32 ReleaseToOsInterval) {
memset(this, 0, sizeof(*this));
@@ -105,11 +107,11 @@ class MapAllocatorCache {
}
}
}
+ s32 Interval;
if (EmptyCache)
empty();
- else if (ReleaseToOsIntervalMs >= 0)
- releaseOlderThan(Time -
- static_cast<u64>(ReleaseToOsIntervalMs) * 1000000);
+ else if ((Interval = getReleaseToOsIntervalMs()) >= 0)
+ releaseOlderThan(Time - static_cast<u64>(Interval) * 1000000);
return EntryCached;
}
@@ -142,6 +144,15 @@ class MapAllocatorCache {
return MaxEntriesCount != 0U && Size <= MaxEntrySize;
}
+ void setReleaseToOsIntervalMs(s32 Interval) {
+ if (Interval >= MaxReleaseToOsIntervalMs) {
+ Interval = MaxReleaseToOsIntervalMs;
+ } else if (Interval <= MinReleaseToOsIntervalMs) {
+ Interval = MinReleaseToOsIntervalMs;
+ }
+ atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed);
+ }
+
void releaseToOS() { releaseOlderThan(UINT64_MAX); }
void disable() { Mutex.lock(); }
@@ -189,6 +200,10 @@ class MapAllocatorCache {
}
}
+ s32 getReleaseToOsIntervalMs() {
+ return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed);
+ }
+
struct CachedBlock {
uptr Block;
uptr BlockEnd;
@@ -203,7 +218,7 @@ class MapAllocatorCache {
u32 EntriesCount;
uptr LargestSize;
u32 IsFullEvents;
- s32 ReleaseToOsIntervalMs;
+ atomic_s32 ReleaseToOsIntervalMs;
};
template <class CacheT> class MapAllocator {
@@ -251,6 +266,10 @@ template <class CacheT> class MapAllocator {
static uptr canCache(uptr Size) { return CacheT::canCache(Size); }
+ void setReleaseToOsIntervalMs(s32 Interval) {
+ Cache.setReleaseToOsIntervalMs(Interval);
+ }
+
void releaseToOS() { Cache.releaseToOS(); }
private:
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/compiler-rt/lib/scudo/standalone/wrappers_c.inc
index 91f615dcb8f8..314a835074e6 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c.inc
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c.inc
@@ -157,7 +157,18 @@ void SCUDO_PREFIX(malloc_postinit)() {
INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, UNUSED int value) {
if (param == M_DECAY_TIME) {
- // TODO(kostyak): set release_to_os_interval_ms accordingly.
+ if (SCUDO_ANDROID) {
+ if (value == 0) {
+ // Will set the release values to their minimum values.
+ value = INT32_MIN;
+ } else {
+ // Will set the release values to their maximum values.
+ value = INT32_MAX;
+ }
+ }
+
+ SCUDO_ALLOCATOR.setOption(scudo::Option::ReleaseInterval,
+ static_cast<scudo::sptr>(value));
return 1;
} else if (param == M_PURGE) {
SCUDO_ALLOCATOR.releaseToOS();
More information about the llvm-commits
mailing list