[compiler-rt] bb50e97 - [NFC][sanitizer] Change StackStore API to use StackTrace
Vitaly Buka via llvm-commits
llvm-commits at lists.llvm.org
Thu Nov 18 18:54:01 PST 2021
Author: Vitaly Buka
Date: 2021-11-18T18:53:51-08:00
New Revision: bb50e97103360983c18639e258203aa1fdab90ff
URL: https://github.com/llvm/llvm-project/commit/bb50e97103360983c18639e258203aa1fdab90ff
DIFF: https://github.com/llvm/llvm-project/commit/bb50e97103360983c18639e258203aa1fdab90ff.diff
LOG: [NFC][sanitizer] Change StackStore API to use StackTrace
Added:
Modified:
compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
Removed:
################################################################################
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
index 33ce0c34f77c..31a457358d64 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
@@ -14,8 +14,27 @@
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
+#include "sanitizer_stacktrace.h"
+
namespace __sanitizer {
+static constexpr u32 kStackSizeBits = 16;
+
+StackStore::Id StackStore::store(const StackTrace &trace) {
+ uptr *stack_trace = alloc(trace.size + 1);
+ CHECK_LT(trace.size, 1 << kStackSizeBits);
+ *stack_trace = trace.size + (trace.tag << kStackSizeBits);
+ internal_memcpy(stack_trace + 1, trace.trace, trace.size * sizeof(uptr));
+ return reinterpret_cast<StackStore::Id>(stack_trace);
+}
+
+StackTrace StackStore::load(Id id) {
+ const uptr *stack_trace = reinterpret_cast<const uptr *>(id);
+ uptr size = *stack_trace & ((1 << kStackSizeBits) - 1);
+ uptr tag = *stack_trace >> kStackSizeBits;
+ return StackTrace(stack_trace + 1, size, tag);
+}
+
uptr *StackStore::tryAlloc(uptr count) {
// Optimisic lock-free allocation, essentially try to bump the region ptr.
for (;;) {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
index a23af375e5a9..604e9fbf34d6 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
@@ -16,17 +16,22 @@
#include "sanitizer_atomic.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_mutex.h"
+#include "sanitizer_stacktrace.h"
namespace __sanitizer {
class StackStore {
public:
- uptr *alloc(uptr count = 1);
+ using Id = uptr;
+
+ Id store(const StackTrace &trace);
+ StackTrace load(Id id);
uptr allocated() const { return atomic_load_relaxed(&mapped_size); }
void TestOnlyUnmap();
private:
+ uptr *alloc(uptr count = 1);
uptr *tryAlloc(uptr count);
uptr *refillAndAlloc(uptr count);
mutable StaticSpinMutex mtx; // Protects alloc of new blocks.
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
index 65fd1f9e3ca8..f108ae22a5ec 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
@@ -19,15 +19,12 @@
namespace __sanitizer {
-static StackStore stackStore;
-
struct StackDepotNode {
using hash_type = u64;
hash_type stack_hash;
u32 link;
static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20;
- static const u32 kStackSizeBits = 16;
typedef StackTrace args_type;
bool eq(hash_type hash, const args_type &args) const {
@@ -50,14 +47,17 @@ struct StackDepotNode {
typedef StackDepotHandle handle_type;
};
+static StackStore stackStore;
+
// FIXME(dvyukov): this single reserved bit is used in TSan.
typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog>
StackDepot;
static StackDepot theDepot;
// Keep rarely accessed stack traces out of frequently access nodes to improve
// caching efficiency.
-static TwoLevelMap<uptr *, StackDepot::kNodesSize1, StackDepot::kNodesSize2>
- tracePtrs;
+static TwoLevelMap<StackStore::Id, StackDepot::kNodesSize1,
+ StackDepot::kNodesSize2>
+ storeIds;
// Keep mutable data out of frequently access nodes to improve caching
// efficiency.
static TwoLevelMap<atomic_uint32_t, StackDepot::kNodesSize1,
@@ -73,26 +73,20 @@ void StackDepotHandle::inc_use_count_unsafe() {
}
uptr StackDepotNode::allocated() {
- return stackStore.allocated() + tracePtrs.MemoryUsage() +
+ return stackStore.allocated() + storeIds.MemoryUsage() +
useCounts.MemoryUsage();
}
void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) {
stack_hash = hash;
- uptr *stack_trace = stackStore.alloc(args.size + 1);
- CHECK_LT(args.size, 1 << kStackSizeBits);
- *stack_trace = args.size + (args.tag << kStackSizeBits);
- internal_memcpy(stack_trace + 1, args.trace, args.size * sizeof(uptr));
- tracePtrs[id] = stack_trace;
+ storeIds[id] = stackStore.store(args);
}
StackDepotNode::args_type StackDepotNode::load(u32 id) const {
- const uptr *stack_trace = tracePtrs[id];
- if (!stack_trace)
+ StackStore::Id store_id = storeIds[id];
+ if (!store_id)
return {};
- uptr size = *stack_trace & ((1 << kStackSizeBits) - 1);
- uptr tag = *stack_trace >> kStackSizeBits;
- return args_type(stack_trace + 1, size, tag);
+ return stackStore.load(store_id);
}
StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); }
@@ -127,7 +121,7 @@ StackDepotHandle StackDepotNode::get_handle(u32 id) {
void StackDepotTestOnlyUnmap() {
theDepot.TestOnlyUnmap();
- tracePtrs.TestOnlyUnmap();
+ storeIds.TestOnlyUnmap();
stackStore.TestOnlyUnmap();
}
More information about the llvm-commits
mailing list