[compiler-rt] [scudo] [MTE] resize stack depot for allocation ring buffer (PR #74515)
Florian Mayer via llvm-commits
llvm-commits at lists.llvm.org
Tue Dec 5 18:33:13 PST 2023
https://github.com/fmayer updated https://github.com/llvm/llvm-project/pull/74515
>From dea8b03aaeac52e536640312bf1e107fef903f31 Mon Sep 17 00:00:00 2001
From: Florian Mayer <fmayer at google.com>
Date: Tue, 5 Dec 2023 15:57:53 -0800
Subject: [PATCH 1/2] [scudo] do not store size inside ring buffer
---
compiler-rt/lib/scudo/standalone/combined.h | 51 +++++++++++--------
.../standalone/fuzz/get_error_info_fuzzer.cpp | 9 ++--
.../scudo/standalone/wrappers_c_bionic.cpp | 5 +-
3 files changed, 35 insertions(+), 30 deletions(-)
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 25ad11dbf7ee5..65ddc488370a7 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -14,6 +14,7 @@
#include "flags.h"
#include "flags_parser.h"
#include "local_cache.h"
+#include "mem_map.h"
#include "memtag.h"
#include "options.h"
#include "quarantine.h"
@@ -935,8 +936,7 @@ class Allocator {
uptr getRingBufferSize() {
initThreadMaybe();
- auto *RingBuffer = getRingBuffer();
- return RingBuffer ? ringBufferSizeInBytes(RingBuffer->Size) : 0;
+ return RingBufferElements ? ringBufferSizeInBytes(RingBufferElements) : 0;
}
static bool setRingBufferSizeForBuffer(char *Buffer, size_t Size) {
@@ -966,8 +966,9 @@ class Allocator {
static void getErrorInfo(struct scudo_error_info *ErrorInfo,
uintptr_t FaultAddr, const char *DepotPtr,
const char *RegionInfoPtr, const char *RingBufferPtr,
- const char *Memory, const char *MemoryTags,
- uintptr_t MemoryAddr, size_t MemorySize) {
+ size_t RingBufferSize, const char *Memory,
+ const char *MemoryTags, uintptr_t MemoryAddr,
+ size_t MemorySize) {
*ErrorInfo = {};
if (!allocatorSupportsMemoryTagging<Config>() ||
MemoryAddr + MemorySize < MemoryAddr)
@@ -986,7 +987,7 @@ class Allocator {
// Check the ring buffer. For primary allocations this will only find UAF;
// for secondary allocations we can find either UAF or OOB.
getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
- RingBufferPtr);
+ RingBufferPtr, RingBufferSize);
// Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
// Beyond that we are likely to hit false positives.
@@ -1051,15 +1052,15 @@ class Allocator {
atomic_u32 DeallocationTid;
};
- MemMapT MemMap;
atomic_uptr Pos;
- u32 Size;
// An array of Size (at least one) elements of type Entry is immediately
// following to this struct.
};
// Pointer to memory mapped area starting with AllocationRingBuffer struct,
// and immediately followed by Size elements of type Entry.
char *RawRingBuffer = {};
+ u32 RingBufferElements = 0;
+ MemMapT RawRingBufferMap;
// The following might get optimized out by the compiler.
NOINLINE void performSanityChecks() {
@@ -1267,7 +1268,7 @@ class Allocator {
u32 DeallocationTid) {
uptr Pos = atomic_fetch_add(&getRingBuffer()->Pos, 1, memory_order_relaxed);
typename AllocationRingBuffer::Entry *Entry =
- getRingBufferEntry(RawRingBuffer, Pos % getRingBuffer()->Size);
+ getRingBufferEntry(RawRingBuffer, Pos % RingBufferElements);
// First invalidate our entry so that we don't attempt to interpret a
// partially written state in getSecondaryErrorInfo(). The fences below
@@ -1408,17 +1409,19 @@ class Allocator {
size_t &NextErrorReport,
uintptr_t FaultAddr,
const StackDepot *Depot,
- const char *RingBufferPtr) {
+ const char *RingBufferPtr,
+ size_t RingBufferSize) {
auto *RingBuffer =
reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
- if (!RingBuffer || RingBuffer->Size == 0)
+ size_t RingBufferElements = ringBufferElementsFromBytes(RingBufferSize);
+ if (!RingBuffer || RingBufferElements == 0)
return;
uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
- for (uptr I = Pos - 1;
- I != Pos - 1 - RingBuffer->Size && NextErrorReport != NumErrorReports;
+ for (uptr I = Pos - 1; I != Pos - 1 - RingBufferElements &&
+ NextErrorReport != NumErrorReports;
--I) {
- auto *Entry = getRingBufferEntry(RingBufferPtr, I % RingBuffer->Size);
+ auto *Entry = getRingBufferEntry(RingBufferPtr, I % RingBufferElements);
uptr EntryPtr = atomic_load_relaxed(&Entry->Ptr);
if (!EntryPtr)
continue;
@@ -1508,9 +1511,8 @@ class Allocator {
getPageSizeCached()),
"scudo:ring_buffer");
RawRingBuffer = reinterpret_cast<char *>(MemMap.getBase());
- auto *RingBuffer = reinterpret_cast<AllocationRingBuffer *>(RawRingBuffer);
- RingBuffer->MemMap = MemMap;
- RingBuffer->Size = AllocationRingBufferSize;
+ RawRingBufferMap = MemMap;
+ RingBufferElements = AllocationRingBufferSize;
static_assert(sizeof(AllocationRingBuffer) %
alignof(typename AllocationRingBuffer::Entry) ==
0,
@@ -1520,16 +1522,23 @@ class Allocator {
void unmapRingBuffer() {
auto *RingBuffer = getRingBuffer();
if (RingBuffer != nullptr) {
- MemMapT MemMap = RingBuffer->MemMap;
- MemMap.unmap(MemMap.getBase(), MemMap.getCapacity());
+ RawRingBufferMap.unmap(RawRingBufferMap.getBase(),
+ RawRingBufferMap.getCapacity());
}
RawRingBuffer = nullptr;
}
- static constexpr size_t ringBufferSizeInBytes(u32 AllocationRingBufferSize) {
+ static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
return sizeof(AllocationRingBuffer) +
- AllocationRingBufferSize *
- sizeof(typename AllocationRingBuffer::Entry);
+ RingBufferElements * sizeof(typename AllocationRingBuffer::Entry);
+ }
+
+ static constexpr size_t ringBufferElementsFromBytes(size_t Bytes) {
+ if (Bytes < sizeof(AllocationRingBuffer)) {
+ return 0;
+ }
+ return (Bytes - sizeof(AllocationRingBuffer)) /
+ sizeof(typename AllocationRingBuffer::Entry);
}
inline AllocationRingBuffer *getRingBuffer() {
diff --git a/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp b/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
index 74456450a4761..5b01ebe11c095 100644
--- a/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
+++ b/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
@@ -46,14 +46,11 @@ extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) {
}
std::string RingBufferBytes = FDP.ConsumeRemainingBytesAsString();
- // RingBuffer is too short.
- if (!AllocatorT::setRingBufferSizeForBuffer(RingBufferBytes.data(),
- RingBufferBytes.size()))
- return 0;
scudo_error_info ErrorInfo;
AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepot.data(),
- RegionInfo.data(), RingBufferBytes.data(), Memory,
- MemoryTags, MemoryAddr, MemorySize);
+ RegionInfo.data(), RingBufferBytes.data(),
+ RingBufferBytes.size(), Memory, MemoryTags,
+ MemoryAddr, MemorySize);
return 0;
}
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
index f203615ab3602..21694c3f17fe5 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
@@ -44,10 +44,9 @@ INTERFACE void __scudo_get_error_info(
const char *ring_buffer, size_t ring_buffer_size, const char *memory,
const char *memory_tags, uintptr_t memory_addr, size_t memory_size) {
(void)(stack_depot_size);
- (void)(ring_buffer_size);
Allocator.getErrorInfo(error_info, fault_addr, stack_depot, region_info,
- ring_buffer, memory, memory_tags, memory_addr,
- memory_size);
+ ring_buffer, ring_buffer_size, memory, memory_tags,
+ memory_addr, memory_size);
}
INTERFACE const char *__scudo_get_stack_depot_addr() {
>From c7ebe3add3e1137a9bd8b9c39c7ac728c570d996 Mon Sep 17 00:00:00 2001
From: Florian Mayer <fmayer at google.com>
Date: Fri, 1 Dec 2023 16:46:29 -0800
Subject: [PATCH 2/2] [scudo] resize stack depot for allocation ring buffer
---
compiler-rt/lib/scudo/standalone/combined.h | 101 +++++++++++----
.../standalone/fuzz/get_error_info_fuzzer.cpp | 14 +--
compiler-rt/lib/scudo/standalone/platform.h | 10 --
.../lib/scudo/standalone/stack_depot.h | 115 +++++++++++++-----
.../scudo/standalone/wrappers_c_bionic.cpp | 9 +-
5 files changed, 170 insertions(+), 79 deletions(-)
diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h
index 65ddc488370a7..cda9704473a2b 100644
--- a/compiler-rt/lib/scudo/standalone/combined.h
+++ b/compiler-rt/lib/scudo/standalone/combined.h
@@ -290,7 +290,9 @@ class Allocator {
uptr Size =
android_unsafe_frame_pointer_chase(Stack, MaxTraceSize + DiscardFrames);
Size = Min<uptr>(Size, MaxTraceSize + DiscardFrames);
- return Depot.insert(Stack + Min<uptr>(DiscardFrames, Size), Stack + Size);
+ return reinterpret_cast<StackDepot *>(RawStackDepot)
+ ->insert(RawStackDepot, Stack + Min<uptr>(DiscardFrames, Size),
+ Stack + Size);
#else
return 0;
#endif
@@ -917,8 +919,14 @@ class Allocator {
Primary.Options.clear(OptionBit::AddLargeAllocationSlack);
}
- const char *getStackDepotAddress() const {
- return reinterpret_cast<const char *>(&Depot);
+ const char *getStackDepotAddress() {
+ initThreadMaybe();
+ return RawStackDepot;
+ }
+
+ uptr getStackDepotSize() {
+ initThreadMaybe();
+ return StackDepotSize;
}
const char *getRegionInfoArrayAddress() const {
@@ -954,45 +962,54 @@ class Allocator {
static const uptr MaxTraceSize = 64;
- static void collectTraceMaybe(const StackDepot *Depot,
+ static void collectTraceMaybe(const char *RawStackDepot,
uintptr_t (&Trace)[MaxTraceSize], u32 Hash) {
+ auto *Depot = reinterpret_cast<const StackDepot *>(RawStackDepot);
uptr RingPos, Size;
- if (!Depot->find(Hash, &RingPos, &Size))
+ if (!Depot->find(RawStackDepot, Hash, &RingPos, &Size))
return;
for (unsigned I = 0; I != Size && I != MaxTraceSize; ++I)
- Trace[I] = static_cast<uintptr_t>((*Depot)[RingPos + I]);
+ Trace[I] = static_cast<uintptr_t>(Depot->at(RawStackDepot, RingPos + I));
}
static void getErrorInfo(struct scudo_error_info *ErrorInfo,
uintptr_t FaultAddr, const char *DepotPtr,
- const char *RegionInfoPtr, const char *RingBufferPtr,
- size_t RingBufferSize, const char *Memory,
- const char *MemoryTags, uintptr_t MemoryAddr,
- size_t MemorySize) {
+ size_t DepotSize, const char *RegionInfoPtr,
+ const char *RingBufferPtr, size_t RingBufferSize,
+ const char *Memory, const char *MemoryTags,
+ uintptr_t MemoryAddr, size_t MemorySize) {
*ErrorInfo = {};
if (!allocatorSupportsMemoryTagging<Config>() ||
MemoryAddr + MemorySize < MemoryAddr)
return;
- auto *Depot = reinterpret_cast<const StackDepot *>(DepotPtr);
+ if (DepotPtr && DepotSize < sizeof(StackDepot)) {
+ return;
+ }
+ if (DepotPtr &&
+ !reinterpret_cast<const StackDepot *>(DepotPtr)->isValid(DepotSize)) {
+ // corrupted stack depot.
+ return;
+ }
+
size_t NextErrorReport = 0;
// Check for OOB in the current block and the two surrounding blocks. Beyond
// that, UAF is more likely.
if (extractTag(FaultAddr) != 0)
- getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
+ getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, DepotPtr,
RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
MemorySize, 0, 2);
// Check the ring buffer. For primary allocations this will only find UAF;
// for secondary allocations we can find either UAF or OOB.
- getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
+ getRingBufferErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, DepotPtr,
RingBufferPtr, RingBufferSize);
// Check for OOB in the 28 blocks surrounding the 3 we checked earlier.
// Beyond that we are likely to hit false positives.
if (extractTag(FaultAddr) != 0)
- getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, Depot,
+ getInlineErrorInfo(ErrorInfo, NextErrorReport, FaultAddr, DepotPtr,
RegionInfoPtr, Memory, MemoryTags, MemoryAddr,
MemorySize, 2, 16);
}
@@ -1040,7 +1057,9 @@ class Allocator {
uptr GuardedAllocSlotSize = 0;
#endif // GWP_ASAN_HOOKS
- StackDepot Depot;
+ char *RawStackDepot = nullptr;
+ uptr StackDepotSize = 0;
+ MemMapT RawStackDepotMap;
struct AllocationRingBuffer {
struct Entry {
@@ -1256,7 +1275,8 @@ class Allocator {
}
void storePrimaryAllocationStackMaybe(const Options &Options, void *Ptr) {
- if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
+ if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)) ||
+ !RawRingBuffer)
return;
auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
Ptr32[MemTagAllocationTraceIndex] = collectStackTrace();
@@ -1289,7 +1309,8 @@ class Allocator {
void storeSecondaryAllocationStackMaybe(const Options &Options, void *Ptr,
uptr Size) {
- if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
+ if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)) ||
+ !RawRingBuffer)
return;
u32 Trace = collectStackTrace();
@@ -1304,7 +1325,8 @@ class Allocator {
void storeDeallocationStackMaybe(const Options &Options, void *Ptr,
u8 PrevTag, uptr Size) {
- if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)))
+ if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks)) ||
+ !RawRingBuffer)
return;
auto *Ptr32 = reinterpret_cast<u32 *>(Ptr);
@@ -1325,7 +1347,7 @@ class Allocator {
static void getInlineErrorInfo(struct scudo_error_info *ErrorInfo,
size_t &NextErrorReport, uintptr_t FaultAddr,
- const StackDepot *Depot,
+ const char *RawStackDepot,
const char *RegionInfoPtr, const char *Memory,
const char *MemoryTags, uintptr_t MemoryAddr,
size_t MemorySize, size_t MinDistance,
@@ -1390,8 +1412,10 @@ class Allocator {
UntaggedFaultAddr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW;
R->allocation_address = ChunkAddr;
R->allocation_size = Header.SizeOrUnusedBytes;
- collectTraceMaybe(Depot, R->allocation_trace,
- Data[MemTagAllocationTraceIndex]);
+ if (RawStackDepot) {
+ collectTraceMaybe(RawStackDepot, R->allocation_trace,
+ Data[MemTagAllocationTraceIndex]);
+ }
R->allocation_tid = Data[MemTagAllocationTidIndex];
return NextErrorReport == NumErrorReports;
};
@@ -1408,13 +1432,13 @@ class Allocator {
static void getRingBufferErrorInfo(struct scudo_error_info *ErrorInfo,
size_t &NextErrorReport,
uintptr_t FaultAddr,
- const StackDepot *Depot,
+ const char *RawStackDepot,
const char *RingBufferPtr,
size_t RingBufferSize) {
auto *RingBuffer =
reinterpret_cast<const AllocationRingBuffer *>(RingBufferPtr);
size_t RingBufferElements = ringBufferElementsFromBytes(RingBufferSize);
- if (!RingBuffer || RingBufferElements == 0)
+ if (!RingBuffer || RingBufferElements == 0 || !RawStackDepot)
return;
uptr Pos = atomic_load_relaxed(&RingBuffer->Pos);
@@ -1473,9 +1497,10 @@ class Allocator {
R->allocation_address = UntaggedEntryPtr;
R->allocation_size = EntrySize;
- collectTraceMaybe(Depot, R->allocation_trace, AllocationTrace);
+ collectTraceMaybe(RawStackDepot, R->allocation_trace, AllocationTrace);
R->allocation_tid = AllocationTid;
- collectTraceMaybe(Depot, R->deallocation_trace, DeallocationTrace);
+ collectTraceMaybe(RawStackDepot, R->deallocation_trace,
+ DeallocationTrace);
R->deallocation_tid = DeallocationTid;
}
}
@@ -1504,6 +1529,28 @@ class Allocator {
return;
u32 AllocationRingBufferSize =
static_cast<u32>(getFlags()->allocation_ring_buffer_size);
+ // We store alloc and free stacks for each entry.
+ constexpr auto kStacksPerRingBufferEntry = 2;
+ u32 TabSize = static_cast<u32>(roundUpPowerOfTwo(kStacksPerRingBufferEntry *
+ AllocationRingBufferSize));
+ constexpr auto kFramesPerStack = 8;
+ static_assert(isPowerOfTwo(kFramesPerStack));
+ u32 RingSize = static_cast<u32>(TabSize * kFramesPerStack);
+ DCHECK(isPowerOfTwo(RingSize));
+ static_assert(sizeof(StackDepot) % alignof(atomic_u64) == 0);
+
+ StackDepotSize = sizeof(StackDepot) + sizeof(atomic_u64) * RingSize +
+ sizeof(atomic_u32) * TabSize;
+ MemMapT DepotMap;
+ DepotMap.map(
+ /*Addr=*/0U, roundUp(StackDepotSize, getPageSizeCached()),
+ "scudo:stack_depot");
+ RawStackDepot = reinterpret_cast<char *>(DepotMap.getBase());
+ auto *Depot = reinterpret_cast<StackDepot *>(DepotMap.getBase());
+ Depot->init(RingSize, TabSize);
+ DCHECK(Depot->isValid(StackDepotSize));
+ RawStackDepotMap = DepotMap;
+
MemMapT MemMap;
MemMap.map(
/*Addr=*/0U,
@@ -1526,6 +1573,10 @@ class Allocator {
RawRingBufferMap.getCapacity());
}
RawRingBuffer = nullptr;
+ if (RawStackDepot) {
+ RawStackDepotMap.unmap(RawStackDepotMap.getBase(),
+ RawStackDepotMap.getCapacity());
+ }
}
static constexpr size_t ringBufferSizeInBytes(u32 RingBufferElements) {
diff --git a/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp b/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
index 5b01ebe11c095..2cef1c44fadc9 100644
--- a/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
+++ b/compiler-rt/lib/scudo/standalone/fuzz/get_error_info_fuzzer.cpp
@@ -9,6 +9,7 @@
#define SCUDO_FUZZ
#include "allocator_config.h"
#include "combined.h"
+#include "common.h"
#include <fuzzer/FuzzedDataProvider.h>
@@ -31,11 +32,6 @@ extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) {
std::string StackDepotBytes =
FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
- std::vector<char> StackDepot(sizeof(scudo::StackDepot), 0);
- for (size_t i = 0; i < StackDepotBytes.length() && i < StackDepot.size();
- ++i) {
- StackDepot[i] = StackDepotBytes[i];
- }
std::string RegionInfoBytes =
FDP.ConsumeRandomLengthString(FDP.remaining_bytes());
@@ -48,9 +44,9 @@ extern "C" int LLVMFuzzerTestOneInput(uint8_t *Data, size_t Size) {
std::string RingBufferBytes = FDP.ConsumeRemainingBytesAsString();
scudo_error_info ErrorInfo;
- AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepot.data(),
- RegionInfo.data(), RingBufferBytes.data(),
- RingBufferBytes.size(), Memory, MemoryTags,
- MemoryAddr, MemorySize);
+ AllocatorT::getErrorInfo(&ErrorInfo, FaultAddr, StackDepotBytes.data(),
+ StackDepotBytes.size(), RegionInfo.data(),
+ RingBufferBytes.data(), RingBufferBytes.size(),
+ Memory, MemoryTags, MemoryAddr, MemorySize);
return 0;
}
diff --git a/compiler-rt/lib/scudo/standalone/platform.h b/compiler-rt/lib/scudo/standalone/platform.h
index b71a86be7669f..5af1275e32d2b 100644
--- a/compiler-rt/lib/scudo/standalone/platform.h
+++ b/compiler-rt/lib/scudo/standalone/platform.h
@@ -63,16 +63,6 @@
#define SCUDO_CAN_USE_MTE (SCUDO_LINUX || SCUDO_TRUSTY)
#endif
-// Use smaller table sizes for fuzzing in order to reduce input size.
-// Trusty just has less available memory.
-#ifndef SCUDO_SMALL_STACK_DEPOT
-#if defined(SCUDO_FUZZ) || SCUDO_TRUSTY
-#define SCUDO_SMALL_STACK_DEPOT 1
-#else
-#define SCUDO_SMALL_STACK_DEPOT 0
-#endif
-#endif
-
#ifndef SCUDO_ENABLE_HOOKS
#define SCUDO_ENABLE_HOOKS 0
#endif
diff --git a/compiler-rt/lib/scudo/standalone/stack_depot.h b/compiler-rt/lib/scudo/standalone/stack_depot.h
index 12c35eb2a4f33..05c2801236f76 100644
--- a/compiler-rt/lib/scudo/standalone/stack_depot.h
+++ b/compiler-rt/lib/scudo/standalone/stack_depot.h
@@ -10,6 +10,8 @@
#define SCUDO_STACK_DEPOT_H_
#include "atomic_helpers.h"
+#include "common.h"
+#include "mem_map.h"
#include "mutex.h"
namespace scudo {
@@ -38,7 +40,7 @@ class MurMur2HashBuilder {
}
};
-class StackDepot {
+class alignas(16) StackDepot {
HybridMutex RingEndMu;
u32 RingEnd = 0;
@@ -62,47 +64,99 @@ class StackDepot {
// This is achieved by re-checking the hash of the stack trace before
// returning the trace.
-#if SCUDO_SMALL_STACK_DEPOT
- static const uptr TabBits = 4;
-#else
- static const uptr TabBits = 16;
-#endif
- static const uptr TabSize = 1 << TabBits;
- static const uptr TabMask = TabSize - 1;
- atomic_u32 Tab[TabSize] = {};
-
-#if SCUDO_SMALL_STACK_DEPOT
- static const uptr RingBits = 4;
-#else
- static const uptr RingBits = 19;
-#endif
- static const uptr RingSize = 1 << RingBits;
- static const uptr RingMask = RingSize - 1;
- atomic_u64 Ring[RingSize] = {};
+ uptr RingSize = 0;
+ uptr RingMask = 0;
+ uptr TabMask = 0;
+
+ atomic_u64 *Ring(char *RawStackDepot) {
+ return reinterpret_cast<atomic_u64 *>(RawStackDepot + sizeof(StackDepot));
+ }
+
+ atomic_u32 *Tab(char *RawStackDepot) {
+ return reinterpret_cast<atomic_u32 *>(RawStackDepot + sizeof(StackDepot) +
+ sizeof(atomic_u64) * RingSize);
+ }
+
+ const atomic_u64 *Ring(const char *RawStackDepot) const {
+ return reinterpret_cast<const atomic_u64 *>(RawStackDepot +
+ sizeof(StackDepot));
+ }
+
+ const atomic_u32 *Tab(const char *RawStackDepot) const {
+ return reinterpret_cast<const atomic_u32 *>(
+ RawStackDepot + sizeof(StackDepot) + sizeof(atomic_u64) * RingSize);
+ }
public:
+ void init(uptr RingSz, uptr TabSz) {
+ DCHECK(isPowerOfTwo(RingSz));
+ DCHECK(isPowerOfTwo(TabSz));
+ RingSize = RingSz;
+ RingMask = RingSz - 1;
+ TabMask = TabSz - 1;
+ }
+
+ bool isValid(uptr BufSize) const {
+ if (RingSize > UINTPTR_MAX / sizeof(atomic_u64)) {
+ return false;
+ }
+ if (RingSize == 0 || !isPowerOfTwo(RingSize)) {
+ return false;
+ }
+ uptr RingBytes = sizeof(atomic_u64) * RingSize;
+ if (RingMask + 1 != RingSize) {
+ return false;
+ }
+
+ if (TabMask == 0) {
+ return false;
+ }
+ if ((TabMask - 1) > UINTPTR_MAX / sizeof(atomic_u32)) {
+ return false;
+ }
+ uptr TabSize = TabMask + 1;
+ if (!isPowerOfTwo(TabSize)) {
+ return false;
+ }
+ uptr TabBytes = sizeof(atomic_u32) * TabSize;
+
+ // Subtract and detect underflow.
+ if (BufSize < sizeof(StackDepot)) {
+ return false;
+ }
+ BufSize -= sizeof(StackDepot);
+ if (BufSize < TabBytes) {
+ return false;
+ }
+ BufSize = TabBytes;
+ if (BufSize < RingBytes) {
+ return false;
+ }
+ return BufSize == 0;
+ }
+
// Insert hash of the stack trace [Begin, End) into the stack depot, and
// return the hash.
- u32 insert(uptr *Begin, uptr *End) {
+ u32 insert(char *RawStackDepot, uptr *Begin, uptr *End) {
MurMur2HashBuilder B;
for (uptr *I = Begin; I != End; ++I)
B.add(u32(*I) >> 2);
u32 Hash = B.get();
u32 Pos = Hash & TabMask;
- u32 RingPos = atomic_load_relaxed(&Tab[Pos]);
- u64 Entry = atomic_load_relaxed(&Ring[RingPos]);
+ u32 RingPos = atomic_load_relaxed(&Tab(RawStackDepot)[Pos]);
+ u64 Entry = atomic_load_relaxed(&Ring(RawStackDepot)[RingPos]);
u64 Id = (u64(End - Begin) << 33) | (u64(Hash) << 1) | 1;
if (Entry == Id)
return Hash;
ScopedLock Lock(RingEndMu);
RingPos = RingEnd;
- atomic_store_relaxed(&Tab[Pos], RingPos);
- atomic_store_relaxed(&Ring[RingPos], Id);
+ atomic_store_relaxed(&Tab(RawStackDepot)[Pos], RingPos);
+ atomic_store_relaxed(&Ring(RawStackDepot)[RingPos], Id);
for (uptr *I = Begin; I != End; ++I) {
RingPos = (RingPos + 1) & RingMask;
- atomic_store_relaxed(&Ring[RingPos], *I);
+ atomic_store_relaxed(&Ring(RawStackDepot)[RingPos], *I);
}
RingEnd = (RingPos + 1) & RingMask;
return Hash;
@@ -111,12 +165,13 @@ class StackDepot {
// Look up a stack trace by hash. Returns true if successful. The trace may be
// accessed via operator[] passing indexes between *RingPosPtr and
// *RingPosPtr + *SizePtr.
- bool find(u32 Hash, uptr *RingPosPtr, uptr *SizePtr) const {
+ bool find(const char *RawStackDepot, u32 Hash, uptr *RingPosPtr,
+ uptr *SizePtr) const {
u32 Pos = Hash & TabMask;
- u32 RingPos = atomic_load_relaxed(&Tab[Pos]);
+ u32 RingPos = atomic_load_relaxed(&Tab(RawStackDepot)[Pos]);
if (RingPos >= RingSize)
return false;
- u64 Entry = atomic_load_relaxed(&Ring[RingPos]);
+ u64 Entry = atomic_load_relaxed(&Ring(RawStackDepot)[RingPos]);
u64 HashWithTagBit = (u64(Hash) << 1) | 1;
if ((Entry & 0x1ffffffff) != HashWithTagBit)
return false;
@@ -128,13 +183,13 @@ class StackDepot {
MurMur2HashBuilder B;
for (uptr I = 0; I != Size; ++I) {
RingPos = (RingPos + 1) & RingMask;
- B.add(u32(atomic_load_relaxed(&Ring[RingPos])) >> 2);
+ B.add(u32(atomic_load_relaxed(&Ring(RawStackDepot)[RingPos])) >> 2);
}
return B.get() == Hash;
}
- u64 operator[](uptr RingPos) const {
- return atomic_load_relaxed(&Ring[RingPos & RingMask]);
+ u64 at(const char *RawStackDepot, uptr RingPos) const {
+ return atomic_load_relaxed(&Ring(RawStackDepot)[RingPos & RingMask]);
}
};
diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
index 21694c3f17fe5..e9d8c1e8d3db2 100644
--- a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
+++ b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp
@@ -43,10 +43,9 @@ INTERFACE void __scudo_get_error_info(
const char *stack_depot, size_t stack_depot_size, const char *region_info,
const char *ring_buffer, size_t ring_buffer_size, const char *memory,
const char *memory_tags, uintptr_t memory_addr, size_t memory_size) {
- (void)(stack_depot_size);
- Allocator.getErrorInfo(error_info, fault_addr, stack_depot, region_info,
- ring_buffer, ring_buffer_size, memory, memory_tags,
- memory_addr, memory_size);
+ Allocator.getErrorInfo(error_info, fault_addr, stack_depot, stack_depot_size,
+ region_info, ring_buffer, ring_buffer_size, memory,
+ memory_tags, memory_addr, memory_size);
}
INTERFACE const char *__scudo_get_stack_depot_addr() {
@@ -54,7 +53,7 @@ INTERFACE const char *__scudo_get_stack_depot_addr() {
}
INTERFACE size_t __scudo_get_stack_depot_size() {
- return sizeof(scudo::StackDepot);
+ return Allocator.getStackDepotSize();
}
INTERFACE const char *__scudo_get_region_info_addr() {
More information about the llvm-commits
mailing list