[compiler-rt] a06d352 - [NFC][sanitizer] Track progress of populating the block

Vitaly Buka via llvm-commits llvm-commits at lists.llvm.org
Mon Nov 29 18:44:47 PST 2021


Author: Vitaly Buka
Date: 2021-11-29T18:44:39-08:00
New Revision: a06d3527563503f17794bf119ee471d0ca2669ca

URL: https://github.com/llvm/llvm-project/commit/a06d3527563503f17794bf119ee471d0ca2669ca
DIFF: https://github.com/llvm/llvm-project/commit/a06d3527563503f17794bf119ee471d0ca2669ca.diff

LOG: [NFC][sanitizer] Track progress of populating the block

In multi-threaded application concurrent StackStore::Store may
finish in order different from assigned Id. So we can't assume
that after we switch writing the next block the previous is done.

The workaround is to count exact number of uptr stored into the block,
including skipped tail/head which were not able to fit entire trace.

Depends on D114490.

Reviewed By: morehouse

Differential Revision: https://reviews.llvm.org/D114493

Added: 
    

Modified: 
    compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
    compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
    compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
    compiler-rt/lib/sanitizer_common/tests/sanitizer_stack_store_test.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
index 54b1141ced1d0..0f880fda511ca 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
@@ -33,14 +33,16 @@ struct StackTraceHeader {
 };
 }  // namespace
 
-StackStore::Id StackStore::Store(const StackTrace &trace) {
+StackStore::Id StackStore::Store(const StackTrace &trace, uptr *pack) {
   if (!trace.size && !trace.tag)
     return 0;
   StackTraceHeader h(trace);
-  uptr idx;
-  uptr *stack_trace = Alloc(h.size + 1, &idx);
+  uptr idx = 0;
+  *pack = 0;
+  uptr *stack_trace = Alloc(h.size + 1, &idx, pack);
   *stack_trace = h.ToUptr();
   internal_memcpy(stack_trace + 1, trace.trace, h.size * sizeof(uptr));
+  *pack += blocks_[GetBlockIdx(idx)].Stored(h.size + 1);
   return OffsetToId(idx);
 }
 
@@ -64,13 +66,14 @@ uptr StackStore::Allocated() const {
          sizeof(*this);
 }
 
-uptr *StackStore::Alloc(uptr count, uptr *idx) {
+uptr *StackStore::Alloc(uptr count, uptr *idx, uptr *pack) {
   for (;;) {
     // Optimisic lock-free allocation, essentially try to bump the
     // total_frames_.
     uptr start = atomic_fetch_add(&total_frames_, count, memory_order_relaxed);
     uptr block_idx = GetBlockIdx(start);
-    if (LIKELY(block_idx == GetBlockIdx(start + count - 1))) {
+    uptr last_idx = GetBlockIdx(start + count - 1);
+    if (LIKELY(block_idx == last_idx)) {
       // Fits into the a single block.
       CHECK_LT(block_idx, ARRAY_SIZE(blocks_));
       *idx = start;
@@ -78,9 +81,19 @@ uptr *StackStore::Alloc(uptr count, uptr *idx) {
     }
 
     // Retry. We can't use range allocated in two 
diff erent blocks.
+    CHECK_LE(count, kBlockSizeFrames);
+    uptr in_first = kBlockSizeFrames - GetInBlockIdx(start);
+    // Mark tail/head of these blocks as "stored".to avoid waiting before we can
+    // Pack().
+    *pack += blocks_[block_idx].Stored(in_first);
+    *pack += blocks_[last_idx].Stored(count - in_first);
   }
 }
 
+void StackStore::Pack() {
+  // TODO
+}
+
 void StackStore::TestOnlyUnmap() {
   for (BlockInfo &b : blocks_) b.TestOnlyUnmap();
   internal_memset(this, 0, sizeof(*this));
@@ -116,4 +129,9 @@ void StackStore::BlockInfo::TestOnlyUnmap() {
     UnmapOrDie(ptr, StackStore::kBlockSizeBytes);
 }
 
+bool StackStore::BlockInfo::Stored(uptr n) {
+  return n + atomic_fetch_add(&stored_, n, memory_order_release) ==
+         kBlockSizeFrames;
+}
+
 }  // namespace __sanitizer

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
index f1e99d132fe9c..a5b457b06441d 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
@@ -29,13 +29,17 @@ class StackStore {
   static_assert(u64(kBlockCount) * kBlockSizeFrames == 1ull << (sizeof(Id) * 8),
                 "");
 
-  Id Store(const StackTrace &trace);
+  Id Store(const StackTrace &trace,
+           uptr *pack /* number of blocks completed by this call */);
   StackTrace Load(Id id) const;
   uptr Allocated() const;
 
+  void Pack();
+
   void TestOnlyUnmap();
 
  private:
+  friend class StackStoreTest;
   static constexpr uptr GetBlockIdx(uptr frame_idx) {
     return frame_idx / kBlockSizeFrames;
   }
@@ -56,7 +60,7 @@ class StackStore {
     return id + 1;  // Avoid zero as id.
   }
 
-  uptr *Alloc(uptr count, uptr *idx);
+  uptr *Alloc(uptr count, uptr *idx, uptr *pack);
 
   // Total number of allocated frames.
   atomic_uintptr_t total_frames_ = {};
@@ -64,7 +68,10 @@ class StackStore {
   // Each block will hold pointer to exactly kBlockSizeFrames.
   class BlockInfo {
     atomic_uintptr_t data_;
-    StaticSpinMutex mtx_;  // Protects alloc of new blocks.
+    // Counter to track store progress to know when we can Pack() the block.
+    atomic_uint32_t stored_;
+    // Protects alloc of new blocks.
+    StaticSpinMutex mtx_;
 
     uptr *Create();
 
@@ -72,6 +79,7 @@ class StackStore {
     uptr *Get() const;
     uptr *GetOrCreate();
     void TestOnlyUnmap();
+    bool Stored(uptr n);
   };
   BlockInfo blocks_[kBlockCount] = {};
 };

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
index 8e1832f073983..2579c1b3e3395 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
@@ -74,7 +74,8 @@ uptr StackDepotNode::allocated() {
 
 void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) {
   stack_hash = hash;
-  store_id = stackStore.Store(args);
+  uptr pack = 0;
+  store_id = stackStore.Store(args, &pack);
 }
 
 StackDepotNode::args_type StackDepotNode::load(u32 id) const {

diff  --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_stack_store_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_stack_store_test.cpp
index c7cb041482b6f..6b11ada6eb05c 100644
--- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_stack_store_test.cpp
+++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_stack_store_test.cpp
@@ -12,6 +12,7 @@
 #include <vector>
 
 #include "gtest/gtest.h"
+#include "sanitizer_atomic.h"
 #include "sanitizer_hash.h"
 #include "sanitizer_stacktrace.h"
 
@@ -40,19 +41,39 @@ class StackStoreTest : public testing::Test {
     };
   }
 
+  using BlockInfo = StackStore::BlockInfo;
+
+  uptr GetTotalFramesCount() const {
+    return atomic_load_relaxed(&store_.total_frames_);
+  }
+
+  uptr CountReadyToPackBlocks() {
+    uptr res = 0;
+    for (BlockInfo& b : store_.blocks_) res += b.Stored(0);
+    return res;
+  }
+
+  uptr IdToOffset(StackStore::Id id) const { return store_.IdToOffset(id); }
+
+  static constexpr uptr kBlockSizeFrames = StackStore::kBlockSizeFrames;
+
   StackStore store_ = {};
 };
 
 TEST_F(StackStoreTest, Empty) {
   uptr before = store_.Allocated();
-  EXPECT_EQ(0u, store_.Store({}));
+  uptr pack = 0;
+  EXPECT_EQ(0u, store_.Store({}, &pack));
   uptr after = store_.Allocated();
   EXPECT_EQ(before, after);
 }
 
 TEST_F(StackStoreTest, Basic) {
   std::vector<StackStore::Id> ids;
-  ForEachTrace([&](const StackTrace& s) { ids.push_back(store_.Store(s)); });
+  ForEachTrace([&](const StackTrace& s) {
+    uptr pack = 0;
+    ids.push_back(store_.Store(s, &pack));
+  });
 
   auto id = ids.begin();
   ForEachTrace([&](const StackTrace& s) {
@@ -67,11 +88,35 @@ TEST_F(StackStoreTest, Basic) {
 TEST_F(StackStoreTest, Allocated) {
   EXPECT_LE(store_.Allocated(), 0x100000u);
   std::vector<StackStore::Id> ids;
-  ForEachTrace([&](const StackTrace& s) { ids.push_back(store_.Store(s)); });
+  ForEachTrace([&](const StackTrace& s) {
+    uptr pack = 0;
+    ids.push_back(store_.Store(s, &pack));
+  });
   EXPECT_NEAR(store_.Allocated(), FIRST_32_SECOND_64(500000000u, 1000000000u),
               FIRST_32_SECOND_64(50000000u, 100000000u));
   store_.TestOnlyUnmap();
   EXPECT_LE(store_.Allocated(), 0x100000u);
 }
 
+TEST_F(StackStoreTest, ReadyToPack) {
+  uptr next_pack = kBlockSizeFrames;
+  uptr total_ready = 0;
+  ForEachTrace(
+      [&](const StackTrace& s) {
+        uptr pack = 0;
+        StackStore::Id id = store_.Store(s, &pack);
+        uptr end_idx = IdToOffset(id) + 1 + s.size;
+        if (end_idx >= next_pack) {
+          EXPECT_EQ(1u, pack);
+          next_pack += kBlockSizeFrames;
+        } else {
+          EXPECT_EQ(0u, pack);
+        }
+        total_ready += pack;
+        EXPECT_EQ(CountReadyToPackBlocks(), total_ready);
+      },
+      100000);
+  EXPECT_EQ(GetTotalFramesCount() / kBlockSizeFrames, total_ready);
+}
+
 }  // namespace __sanitizer


        


More information about the llvm-commits mailing list