[compiler-rt] [scudo] [MTE] resize stack depot for allocation ring buffer (PR #74515)

via llvm-commits llvm-commits at lists.llvm.org
Wed Dec 13 15:24:56 PST 2023


================
@@ -62,47 +63,103 @@ class StackDepot {
   // This is achieved by re-checking the hash of the stack trace before
   // returning the trace.
 
-#if SCUDO_SMALL_STACK_DEPOT
-  static const uptr TabBits = 4;
-#else
-  static const uptr TabBits = 16;
-#endif
-  static const uptr TabSize = 1 << TabBits;
-  static const uptr TabMask = TabSize - 1;
-  atomic_u32 Tab[TabSize] = {};
-
-#if SCUDO_SMALL_STACK_DEPOT
-  static const uptr RingBits = 4;
-#else
-  static const uptr RingBits = 19;
-#endif
-  static const uptr RingSize = 1 << RingBits;
-  static const uptr RingMask = RingSize - 1;
-  atomic_u64 Ring[RingSize] = {};
+  uptr RingSize = 0;
+  uptr RingMask = 0;
+  uptr TabMask = 0;
+  // This is immediately followed by RingSize atomic_u64 and
+  // (TabMask + 1) atomic_u32.
+
+  atomic_u64 *Ring(char *RawStackDepot) {
+    return reinterpret_cast<atomic_u64 *>(RawStackDepot + sizeof(StackDepot));
+  }
+
+  atomic_u32 *Tab(char *RawStackDepot) {
+    return reinterpret_cast<atomic_u32 *>(RawStackDepot + sizeof(StackDepot) +
+                                          sizeof(atomic_u64) * RingSize);
+  }
+
+  const atomic_u64 *Ring(const char *RawStackDepot) const {
+    return reinterpret_cast<const atomic_u64 *>(RawStackDepot +
+                                                sizeof(StackDepot));
+  }
+
+  const atomic_u32 *Tab(const char *RawStackDepot) const {
+    return reinterpret_cast<const atomic_u32 *>(
+        RawStackDepot + sizeof(StackDepot) + sizeof(atomic_u64) * RingSize);
+  }
 
 public:
+  void init(uptr RingSz, uptr TabSz) {
+    DCHECK(isPowerOfTwo(RingSz));
+    DCHECK(isPowerOfTwo(TabSz));
+    RingSize = RingSz;
+    RingMask = RingSz - 1;
+    TabMask = TabSz - 1;
+  }
+
+  // Ensure that RingSize, RingMask and TabMask are set up in a way that
+  // all accesses are within range of BufSize.
+  bool isValid(uptr BufSize) const {
+    if (RingSize > UINTPTR_MAX / sizeof(atomic_u64)) {
+      return false;
+    }
+    if (RingSize == 0 || !isPowerOfTwo(RingSize)) {
+      return false;
+    }
+    uptr RingBytes = sizeof(atomic_u64) * RingSize;
+    if (RingMask + 1 != RingSize) {
+      return false;
+    }
+
+    if (TabMask == 0) {
+      return false;
+    }
+    if ((TabMask - 1) > UINTPTR_MAX / sizeof(atomic_u32)) {
+      return false;
+    }
+    uptr TabSize = TabMask + 1;
+    if (!isPowerOfTwo(TabSize)) {
+      return false;
+    }
+    uptr TabBytes = sizeof(atomic_u32) * TabSize;
+
+    // Subtract and detect underflow.
+    if (BufSize < sizeof(StackDepot)) {
+      return false;
+    }
+    BufSize -= sizeof(StackDepot);
+    if (BufSize < TabBytes) {
+      return false;
+    }
+    BufSize = TabBytes;
+    if (BufSize < RingBytes) {
+      return false;
+    }
+    return BufSize == 0;
+  }
+
   // Insert hash of the stack trace [Begin, End) into the stack depot, and
   // return the hash.
-  u32 insert(uptr *Begin, uptr *End) {
+  u32 insert(char *RawStackDepot, uptr *Begin, uptr *End) {
     MurMur2HashBuilder B;
     for (uptr *I = Begin; I != End; ++I)
       B.add(u32(*I) >> 2);
     u32 Hash = B.get();
 
     u32 Pos = Hash & TabMask;
-    u32 RingPos = atomic_load_relaxed(&Tab[Pos]);
-    u64 Entry = atomic_load_relaxed(&Ring[RingPos]);
+    u32 RingPos = atomic_load_relaxed(&Tab(RawStackDepot)[Pos]);
+    u64 Entry = atomic_load_relaxed(&Ring(RawStackDepot)[RingPos]);
----------------
ChiaHungDuan wrote:

```suggestion
   atomic_u32 *Tab = getTab(RawStackDepot);
   atomic_u64 *Ring = getRing(RawStackDepot);
   u32 RingPos = atomic_load_relaxed(&Tab[Pos]);
   u64 Entry = atomic_load_relaxed(&Ring[RingPos]);
   
   // And the following uses of Tab/Ring
```
`getRing`/`getTab` may be the better names

https://github.com/llvm/llvm-project/pull/74515


More information about the llvm-commits mailing list