[compiler-rt] d591a46 - [NFC][sanitizer] Fix naming in StackStore

Vitaly Buka via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 18 19:39:37 PST 2021


Author: Vitaly Buka
Date: 2021-11-18T19:39:20-08:00
New Revision: d591a46d17aef35d9f17d7d68f5c6c6f8f305b25

URL: https://github.com/llvm/llvm-project/commit/d591a46d17aef35d9f17d7d68f5c6c6f8f305b25
DIFF: https://github.com/llvm/llvm-project/commit/d591a46d17aef35d9f17d7d68f5c6c6f8f305b25.diff

LOG: [NFC][sanitizer] Fix naming in StackStore

Added: 
    

Modified: 
    compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
    compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
    compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
index 31a457358d64..ad88e2bbbefc 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.cpp
@@ -5,10 +5,6 @@
 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 //
 //===----------------------------------------------------------------------===//
-//
-// A fast memory allocator that does not support free() nor realloc().
-// All allocations are forever.
-//===----------------------------------------------------------------------===//
 
 #include "sanitizer_stack_store.h"
 
@@ -20,73 +16,73 @@ namespace __sanitizer {
 
 static constexpr u32 kStackSizeBits = 16;
 
-StackStore::Id StackStore::store(const StackTrace &trace) {
-  uptr *stack_trace = alloc(trace.size + 1);
+StackStore::Id StackStore::Store(const StackTrace &trace) {
+  uptr *stack_trace = Alloc(trace.size + 1);
   CHECK_LT(trace.size, 1 << kStackSizeBits);
   *stack_trace = trace.size + (trace.tag << kStackSizeBits);
   internal_memcpy(stack_trace + 1, trace.trace, trace.size * sizeof(uptr));
   return reinterpret_cast<StackStore::Id>(stack_trace);
 }
 
-StackTrace StackStore::load(Id id) {
+StackTrace StackStore::Load(Id id) {
   const uptr *stack_trace = reinterpret_cast<const uptr *>(id);
   uptr size = *stack_trace & ((1 << kStackSizeBits) - 1);
   uptr tag = *stack_trace >> kStackSizeBits;
   return StackTrace(stack_trace + 1, size, tag);
 }
 
-uptr *StackStore::tryAlloc(uptr count) {
+uptr *StackStore::TryAlloc(uptr count) {
   // Optimisic lock-free allocation, essentially try to bump the region ptr.
   for (;;) {
-    uptr cmp = atomic_load(&region_pos, memory_order_acquire);
-    uptr end = atomic_load(&region_end, memory_order_acquire);
+    uptr cmp = atomic_load(&region_pos_, memory_order_acquire);
+    uptr end = atomic_load(&region_end_, memory_order_acquire);
     uptr size = count * sizeof(uptr);
     if (cmp == 0 || cmp + size > end)
       return nullptr;
-    if (atomic_compare_exchange_weak(&region_pos, &cmp, cmp + size,
+    if (atomic_compare_exchange_weak(&region_pos_, &cmp, cmp + size,
                                      memory_order_acquire))
       return reinterpret_cast<uptr *>(cmp);
   }
 }
 
-uptr *StackStore::alloc(uptr count) {
+uptr *StackStore::Alloc(uptr count) {
   // First, try to allocate optimisitically.
-  uptr *s = tryAlloc(count);
+  uptr *s = TryAlloc(count);
   if (LIKELY(s))
     return s;
-  return refillAndAlloc(count);
+  return RefillAndAlloc(count);
 }
 
-uptr *StackStore::refillAndAlloc(uptr count) {
+uptr *StackStore::RefillAndAlloc(uptr count) {
   // If failed, lock, retry and alloc new superblock.
-  SpinMutexLock l(&mtx);
+  SpinMutexLock l(&mtx_);
   for (;;) {
-    uptr *s = tryAlloc(count);
+    uptr *s = TryAlloc(count);
     if (s)
       return s;
-    atomic_store(&region_pos, 0, memory_order_relaxed);
+    atomic_store(&region_pos_, 0, memory_order_relaxed);
     uptr size = count * sizeof(uptr) + sizeof(BlockInfo);
     uptr allocsz = RoundUpTo(Max<uptr>(size, 64u * 1024u), GetPageSizeCached());
     uptr mem = (uptr)MmapOrDie(allocsz, "stack depot");
     BlockInfo *new_block = (BlockInfo *)(mem + allocsz) - 1;
-    new_block->next = curr;
+    new_block->next = curr_;
     new_block->ptr = mem;
     new_block->size = allocsz;
-    curr = new_block;
+    curr_ = new_block;
 
-    atomic_fetch_add(&mapped_size, allocsz, memory_order_relaxed);
+    atomic_fetch_add(&mapped_size_, allocsz, memory_order_relaxed);
 
     allocsz -= sizeof(BlockInfo);
-    atomic_store(&region_end, mem + allocsz, memory_order_release);
-    atomic_store(&region_pos, mem, memory_order_release);
+    atomic_store(&region_end_, mem + allocsz, memory_order_release);
+    atomic_store(&region_pos_, mem, memory_order_release);
   }
 }
 
 void StackStore::TestOnlyUnmap() {
-  while (curr) {
-    uptr mem = curr->ptr;
-    uptr allocsz = curr->size;
-    curr = curr->next;
+  while (curr_) {
+    uptr mem = curr_->ptr;
+    uptr allocsz = curr_->size;
+    curr_ = curr_->next;
     UnmapOrDie((void *)mem, allocsz);
   }
   internal_memset(this, 0, sizeof(*this));

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
index b26bca9c2c79..b5bbdccc20b1 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stack_store.h
@@ -5,10 +5,6 @@
 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
 //
 //===----------------------------------------------------------------------===//
-//
-// A fast memory allocator that does not support free() nor realloc().
-// All allocations are forever.
-//===----------------------------------------------------------------------===//
 
 #ifndef SANITIZER_STACK_STORE_H
 #define SANITIZER_STACK_STORE_H
@@ -26,27 +22,27 @@ class StackStore {
 
   using Id = uptr;
 
-  Id store(const StackTrace &trace);
-  StackTrace load(Id id);
-  uptr allocated() const { return atomic_load_relaxed(&mapped_size); }
+  Id Store(const StackTrace &trace);
+  StackTrace Load(Id id);
+  uptr Allocated() const { return atomic_load_relaxed(&mapped_size_); }
 
   void TestOnlyUnmap();
 
  private:
-  uptr *alloc(uptr count = 1);
-  uptr *tryAlloc(uptr count);
-  uptr *refillAndAlloc(uptr count);
-  mutable StaticSpinMutex mtx = {};  // Protects alloc of new blocks.
-  atomic_uintptr_t region_pos = {};  // Region allocator for Node's.
-  atomic_uintptr_t region_end = {};
-  atomic_uintptr_t mapped_size = {};
+  uptr *Alloc(uptr count = 1);
+  uptr *TryAlloc(uptr count);
+  uptr *RefillAndAlloc(uptr count);
+  mutable StaticSpinMutex mtx_ = {};  // Protects alloc of new blocks.
+  atomic_uintptr_t region_pos_ = {};  // Region allocator for Node's.
+  atomic_uintptr_t region_end_ = {};
+  atomic_uintptr_t mapped_size_ = {};
 
   struct BlockInfo {
     const BlockInfo *next;
     uptr ptr;
     uptr size;
   };
-  const BlockInfo *curr = nullptr;
+  const BlockInfo *curr_ = nullptr;
 };
 
 }  // namespace __sanitizer

diff  --git a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
index f108ae22a5ec..e203b2cc4c89 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_stackdepot.cpp
@@ -73,20 +73,20 @@ void StackDepotHandle::inc_use_count_unsafe() {
 }
 
 uptr StackDepotNode::allocated() {
-  return stackStore.allocated() + storeIds.MemoryUsage() +
+  return stackStore.Allocated() + storeIds.MemoryUsage() +
          useCounts.MemoryUsage();
 }
 
 void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) {
   stack_hash = hash;
-  storeIds[id] = stackStore.store(args);
+  storeIds[id] = stackStore.Store(args);
 }
 
 StackDepotNode::args_type StackDepotNode::load(u32 id) const {
   StackStore::Id store_id = storeIds[id];
   if (!store_id)
     return {};
-  return stackStore.load(store_id);
+  return stackStore.Load(store_id);
 }
 
 StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); }


        


More information about the llvm-commits mailing list