[llvm-branch-commits] [compiler-rt] faac1c0 - scudo: Move the management of the UseMemoryTagging bit out of the Primary. NFCI.
Peter Collingbourne via llvm-branch-commits
llvm-branch-commits at lists.llvm.org
Tue Dec 22 16:57:33 PST 2020
Author: Peter Collingbourne
Date: 2020-12-22T16:52:54-08:00
New Revision: faac1c02c802048efa17f8f6cda8f39b5584f0c6
URL: https://github.com/llvm/llvm-project/commit/faac1c02c802048efa17f8f6cda8f39b5584f0c6
DIFF: https://github.com/llvm/llvm-project/commit/faac1c02c802048efa17f8f6cda8f39b5584f0c6.diff
LOG: scudo: Move the management of the UseMemoryTagging bit out of the Primary. NFCI.
The primary and secondary allocators will need to share this bit,
so move the management of the bit to the combined allocator and
make useMemoryTagging() a free function.
Differential Revision: https://reviews.llvm.org/D93730
Added:
Modified:
compiler-rt/lib/scudo/standalone/combined.h
compiler-rt/lib/scudo/standalone/options.h
compiler-rt/lib/scudo/standalone/primary32.h
compiler-rt/lib/scudo/standalone/primary64.h
compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
Removed:
################################################################################
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index fae71ba1b84f..911c49dcc3fd 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -100,7 +100,7 @@ class Allocator {
// Reset tag to 0 as this chunk may have been previously used for a tagged
// user allocation.
- if (UNLIKELY(Allocator.useMemoryTagging()))
+ if (UNLIKELY(useMemoryTagging<Params>(Allocator.Primary.Options.load())))
storeTags(reinterpret_cast<uptr>(Ptr),
reinterpret_cast<uptr>(Ptr) + sizeof(QuarantineBatch));
@@ -161,6 +161,9 @@ class Allocator {
Primary.Options.set(OptionBit::DeallocTypeMismatch);
if (getFlags()->delete_size_mismatch)
Primary.Options.set(OptionBit::DeleteSizeMismatch);
+ if (allocatorSupportsMemoryTagging<Params>() &&
+ systemSupportsMemoryTagging())
+ Primary.Options.set(OptionBit::UseMemoryTagging);
Primary.Options.set(OptionBit::UseOddEvenTags);
QuarantineMaxChunkSize =
@@ -240,7 +243,7 @@ class Allocator {
}
ALWAYS_INLINE void *untagPointerMaybe(void *Ptr) {
- if (Primary.SupportsMemoryTagging)
+ if (allocatorSupportsMemoryTagging<Params>())
return reinterpret_cast<void *>(
untagPointer(reinterpret_cast<uptr>(Ptr)));
return Ptr;
@@ -367,7 +370,7 @@ class Allocator {
//
// When memory tagging is enabled, zeroing the contents is done as part of
// setting the tag.
- if (UNLIKELY(useMemoryTagging(Options))) {
+ if (UNLIKELY(useMemoryTagging<Params>(Options))) {
uptr PrevUserPtr;
Chunk::UnpackedHeader Header;
const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId);
@@ -594,7 +597,7 @@ class Allocator {
: BlockEnd - (reinterpret_cast<uptr>(OldPtr) + NewSize)) &
Chunk::SizeOrUnusedBytesMask;
Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader);
- if (UNLIKELY(ClassId && useMemoryTagging(Options))) {
+ if (UNLIKELY(ClassId && useMemoryTagging<Params>(Options))) {
resizeTaggedChunk(reinterpret_cast<uptr>(OldTaggedPtr) + OldSize,
reinterpret_cast<uptr>(OldTaggedPtr) + NewSize,
BlockEnd);
@@ -692,7 +695,7 @@ class Allocator {
if (getChunkFromBlock(Block, &Chunk, &Header) &&
Header.State == Chunk::State::Allocated) {
uptr TaggedChunk = Chunk;
- if (useMemoryTagging(Primary.Options.load()))
+ if (useMemoryTagging<Params>(Primary.Options.load()))
TaggedChunk = loadTag(Chunk);
Callback(TaggedChunk, getSize(reinterpret_cast<void *>(Chunk), &Header),
Arg);
@@ -783,15 +786,14 @@ class Allocator {
Header.State == Chunk::State::Allocated;
}
- bool useMemoryTagging() const {
- return useMemoryTagging(Primary.Options.load());
+ bool useMemoryTaggingTestOnly() const {
+ return useMemoryTagging<Params>(Primary.Options.load());
}
- static bool useMemoryTagging(Options Options) {
- return PrimaryT::useMemoryTagging(Options);
+ void disableMemoryTagging() {
+ if (allocatorSupportsMemoryTagging<Params>())
+ Primary.Options.clear(OptionBit::UseMemoryTagging);
}
- void disableMemoryTagging() { Primary.disableMemoryTagging(); }
-
void setTrackAllocationStacks(bool Track) {
initThreadMaybe();
if (Track)
@@ -823,7 +825,7 @@ class Allocator {
const char *MemoryTags, uintptr_t MemoryAddr,
size_t MemorySize) {
*ErrorInfo = {};
- if (!PrimaryT::SupportsMemoryTagging ||
+ if (!allocatorSupportsMemoryTagging<Params>() ||
MemoryAddr + MemorySize < MemoryAddr)
return;
@@ -942,7 +944,7 @@ class Allocator {
static_assert(MinAlignment >= sizeof(Chunk::PackedHeader),
"Minimal alignment must at least cover a chunk header.");
- static_assert(!PrimaryT::SupportsMemoryTagging ||
+ static_assert(!allocatorSupportsMemoryTagging<Params>() ||
MinAlignment >= archMemoryTagGranuleSize(),
"");
@@ -1037,7 +1039,7 @@ class Allocator {
void quarantineOrDeallocateChunk(Options Options, void *Ptr,
Chunk::UnpackedHeader *Header, uptr Size) {
Chunk::UnpackedHeader NewHeader = *Header;
- if (UNLIKELY(NewHeader.ClassId && useMemoryTagging(Options))) {
+ if (UNLIKELY(NewHeader.ClassId && useMemoryTagging<Params>(Options))) {
u8 PrevTag = extractTag(loadTag(reinterpret_cast<uptr>(Ptr)));
if (!TSDRegistry.getDisableMemInit()) {
uptr TaggedBegin, TaggedEnd;
diff --git a/compiler-rt/lib/scudo/standalone/options.h b/compiler-rt/lib/scudo/standalone/options.h
index 2cffc4d75c38..91301bf5ec9c 100644
--- a/compiler-rt/lib/scudo/standalone/options.h
+++ b/compiler-rt/lib/scudo/standalone/options.h
@@ -11,6 +11,7 @@
#include "atomic_helpers.h"
#include "common.h"
+#include "memtag.h"
namespace scudo {
@@ -36,6 +37,11 @@ struct Options {
}
};
+template <typename Config> bool useMemoryTagging(Options Options) {
+ return allocatorSupportsMemoryTagging<Config>() &&
+ Options.get(OptionBit::UseMemoryTagging);
+}
+
struct AtomicOptions {
atomic_u32 Val;
diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h
index c744670b4392..a88a2a67e951 100644
--- a/compiler-rt/lib/scudo/standalone/primary32.h
+++ b/compiler-rt/lib/scudo/standalone/primary32.h
@@ -50,7 +50,6 @@ template <typename Config> class SizeClassAllocator32 {
typedef SizeClassAllocator32<Config> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
typedef typename CacheT::TransferBatch TransferBatch;
- static const bool SupportsMemoryTagging = false;
static uptr getSizeByClassId(uptr ClassId) {
return (ClassId == SizeClassMap::BatchClassId)
@@ -216,9 +215,6 @@ template <typename Config> class SizeClassAllocator32 {
return TotalReleasedBytes;
}
- static bool useMemoryTagging(UNUSED Options Options) { return false; }
- void disableMemoryTagging() {}
-
const char *getRegionInfoArrayAddress() const { return nullptr; }
static uptr getRegionInfoArraySize() { return 0; }
diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h
index df1310aa8e95..2724a2529f75 100644
--- a/compiler-rt/lib/scudo/standalone/primary64.h
+++ b/compiler-rt/lib/scudo/standalone/primary64.h
@@ -46,8 +46,6 @@ template <typename Config> class SizeClassAllocator64 {
typedef SizeClassAllocator64<Config> ThisT;
typedef SizeClassAllocatorLocalCache<ThisT> CacheT;
typedef typename CacheT::TransferBatch TransferBatch;
- static const bool SupportsMemoryTagging =
- allocatorSupportsMemoryTagging<Config>();
static uptr getSizeByClassId(uptr ClassId) {
return (ClassId == SizeClassMap::BatchClassId)
@@ -76,9 +74,6 @@ template <typename Config> class SizeClassAllocator64 {
Region->ReleaseInfo.LastReleaseAtNs = Time;
}
setOption(Option::ReleaseInterval, static_cast<sptr>(ReleaseToOsInterval));
-
- if (SupportsMemoryTagging && systemSupportsMemoryTagging())
- Options.set(OptionBit::UseMemoryTagging);
}
void init(s32 ReleaseToOsInterval) {
memset(this, 0, sizeof(*this));
@@ -193,11 +188,6 @@ template <typename Config> class SizeClassAllocator64 {
return TotalReleasedBytes;
}
- static bool useMemoryTagging(Options Options) {
- return SupportsMemoryTagging && Options.get(OptionBit::UseMemoryTagging);
- }
- void disableMemoryTagging() { Options.clear(OptionBit::UseMemoryTagging); }
-
const char *getRegionInfoArrayAddress() const {
return reinterpret_cast<const char *>(RegionInfoArray);
}
@@ -335,7 +325,7 @@ template <typename Config> class SizeClassAllocator64 {
if (!map(reinterpret_cast<void *>(RegionBeg + MappedUser), UserMapSize,
"scudo:primary",
MAP_ALLOWNOMEM | MAP_RESIZABLE |
- (useMemoryTagging(Options.load()) ? MAP_MEMTAG : 0),
+ (useMemoryTagging<Config>(Options.load()) ? MAP_MEMTAG : 0),
&Region->Data))
return nullptr;
Region->MappedUser += UserMapSize;
diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
index 53874933e7ac..7bb6725d3a52 100644
--- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
+++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp
@@ -47,7 +47,7 @@ bool isPrimaryAllocation(scudo::uptr Size, scudo::uptr Alignment) {
template <class AllocatorT>
bool isTaggedAllocation(AllocatorT *Allocator, scudo::uptr Size,
scudo::uptr Alignment) {
- return Allocator->useMemoryTagging() &&
+ return Allocator->useMemoryTaggingTestOnly() &&
scudo::systemDetectsMemoryTagFaultsTestOnly() &&
isPrimaryAllocation<AllocatorT>(Size, Alignment);
}
@@ -162,7 +162,7 @@ template <class Config> static void testAllocator() {
for (scudo::uptr I = 0; I < Size; I++) {
unsigned char V = (reinterpret_cast<unsigned char *>(P))[I];
if (isPrimaryAllocation<AllocatorT>(Size, 1U << MinAlignLog) &&
- !Allocator->useMemoryTagging())
+ !Allocator->useMemoryTaggingTestOnly())
ASSERT_EQ(V, scudo::PatternFillByte);
else
ASSERT_TRUE(V == scudo::PatternFillByte || V == 0);
@@ -248,7 +248,7 @@ template <class Config> static void testAllocator() {
Allocator->releaseToOS();
- if (Allocator->useMemoryTagging() &&
+ if (Allocator->useMemoryTaggingTestOnly() &&
scudo::systemDetectsMemoryTagFaultsTestOnly()) {
// Check that use-after-free is detected.
for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) {
@@ -493,7 +493,7 @@ TEST(ScudoCombinedTest, OddEven) {
using SizeClassMap = AllocatorT::PrimaryT::SizeClassMap;
auto Allocator = std::unique_ptr<AllocatorT>(new AllocatorT());
- if (!Allocator->useMemoryTagging())
+ if (!Allocator->useMemoryTaggingTestOnly())
return;
auto CheckOddEven = [](scudo::uptr P1, scudo::uptr P2) {
More information about the llvm-branch-commits
mailing list