[compiler-rt] cc2794a - [sanitizer] Switch StackStore from pointers to 32bit IDs
Vitaly Buka via llvm-commits
llvm-commits at lists.llvm.org
Sun Nov 28 01:44:50 PST 2021
Author: Vitaly Buka
Date: 2021-11-28T01:44:28-08:00
New Revision: cc2794abeab5d876c50523de193ea7b5849018cc
URL: https://github.com/llvm/llvm-project/commit/cc2794abeab5d876c50523de193ea7b5849018cc
DIFF: https://github.com/llvm/llvm-project/commit/cc2794abeab5d876c50523de193ea7b5849018cc.diff
LOG: [sanitizer] Switch StackStore from pointers to 32bit IDs
Depends on D114488.
Reviewed By: morehouse, dvyukov, kstoimenov
Differential Revision: https://reviews.llvm.org/D114489
Added:
Modified:
compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
Removed:
################################################################################
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
index 19f2c1701a386..8f1f95224dba7 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
@@ -37,16 +37,23 @@ StackStore::Id StackStore::Store(const StackTrace &trace) {
if (!trace.size && !trace.tag)
return 0;
StackTraceHeader h(trace);
- uptr *stack_trace = Alloc(h.size + 1);
+ uptr idx;
+ uptr *stack_trace = Alloc(h.size + 1, &idx);
*stack_trace = h.ToUptr();
internal_memcpy(stack_trace + 1, trace.trace, h.size * sizeof(uptr));
- return reinterpret_cast<StackStore::Id>(stack_trace);
+ return OffsetToId(idx);
}
StackTrace StackStore::Load(Id id) const {
if (!id)
return {};
- const uptr *stack_trace = reinterpret_cast<const uptr *>(id);
+ uptr idx = IdToOffset(id);
+ uptr block_idx = GetBlockIdx(idx);
+ CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
+ uptr *stack_trace = blocks_[block_idx].Get();
+ if (!stack_trace)
+ return {};
+ stack_trace += GetInBlockIdx(idx);
StackTraceHeader h(*stack_trace);
return StackTrace(stack_trace + 1, h.size, h.tag);
}
@@ -57,7 +64,7 @@ uptr StackStore::Allocated() const {
sizeof(*this);
}
-uptr *StackStore::Alloc(uptr count) {
+uptr *StackStore::Alloc(uptr count, uptr *idx) {
for (;;) {
// Optimisic lock-free allocation, essentially try to bump the
// total_frames_.
@@ -66,6 +73,7 @@ uptr *StackStore::Alloc(uptr count) {
if (LIKELY(block_idx == GetBlockIdx(start + count - 1))) {
// Fits into the a single block.
CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
+ *idx = start;
return blocks_[block_idx].GetOrCreate() + GetInBlockIdx(start);
}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
index 389bb48668a14..f1e99d132fe9c 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
@@ -25,7 +25,9 @@ class StackStore {
public:
constexpr StackStore() = default;
- using Id = uptr;
+ using Id = u32; // Enough for 2^32 * sizeof(uptr) bytes of traces.
+ static_assert(u64(kBlockCount) * kBlockSizeFrames == 1ull << (sizeof(Id) * 8),
+ "");
Id Store(const StackTrace &trace);
StackTrace Load(Id id) const;
@@ -42,7 +44,19 @@ class StackStore {
return frame_idx % kBlockSizeFrames;
}
- uptr *Alloc(uptr count);
+ static constexpr uptr IdToOffset(Id id) {
+ CHECK_NE(id, 0);
+ return id - 1; // Avoid zero as id.
+ }
+
+ static constexpr uptr OffsetToId(Id id) {
+ // This makes UINT32_MAX to 0 and it will be retrived as and empty stack.
+ // But this is not a problem as we will not be able to store anything after
+ // that anyway.
+ return id + 1; // Avoid zero as id.
+ }
+
+ uptr *Alloc(uptr count, uptr *idx);
// Total number of allocated frames.
atomic_uintptr_t total_frames_ = {};
@@ -53,9 +67,9 @@ class StackStore {
StaticSpinMutex mtx_; // Protects alloc of new blocks.
uptr *Create();
- uptr *Get() const;
public:
+ uptr *Get() const;
uptr *GetOrCreate();
void TestOnlyUnmap();
};
More information about the llvm-commits
mailing list