[compiler-rt] f72e509 - [lsan] Reduce StopTheWorld access to StackDepot

Vitaly Buka via llvm-commits llvm-commits at lists.llvm.org
Thu Dec 9 11:43:35 PST 2021


Author: Vitaly Buka
Date: 2021-12-09T11:43:30-08:00
New Revision: f72e50946c713c1e2444abd4a2e3f455ebb1045e

URL: https://github.com/llvm/llvm-project/commit/f72e50946c713c1e2444abd4a2e3f455ebb1045e
DIFF: https://github.com/llvm/llvm-project/commit/f72e50946c713c1e2444abd4a2e3f455ebb1045e.diff

LOG: [lsan] Reduce StopTheWorld access to StackDepot

StackDepot locks some stuff. As is there is small probability to
deadlock if we stop thread which locked the Depot.

We need either Lock/Unlock StackDepot for StopTheWorld, or don't
interact with StackDepot from there.

This patch does not run LeakReport under StopTheWorld. LeakReport
contains most of StackDepot access.

As a bonus this patch will help to resolve kMaxLeaksConsidered FIXME.

Depends on D114498.

Reviewed By: morehouse, kstoimenov

Differential Revision: https://reviews.llvm.org/D115284

Added: 
    

Modified: 
    compiler-rt/lib/lsan/lsan_common.cpp
    compiler-rt/lib/lsan/lsan_common.h

Removed: 
    


################################################################################
diff  --git a/compiler-rt/lib/lsan/lsan_common.cpp b/compiler-rt/lib/lsan/lsan_common.cpp
index ee20ed2a6780..658415bce507 100644
--- a/compiler-rt/lib/lsan/lsan_common.cpp
+++ b/compiler-rt/lib/lsan/lsan_common.cpp
@@ -636,15 +636,13 @@ static void ResetTagsCb(uptr chunk, void *arg) {
 // a LeakReport.
 static void CollectLeaksCb(uptr chunk, void *arg) {
   CHECK(arg);
-  LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
+  LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
   chunk = GetUserBegin(chunk);
   LsanMetadata m(chunk);
   if (!m.allocated())
     return;
-  if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
-    leak_report->AddLeakedChunk(chunk, m.stack_trace_id(), m.requested_size(),
-                                m.tag());
-  }
+  if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
+    leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
 }
 
 void LeakSuppressionContext::PrintMatchedSuppressions() {
@@ -705,7 +703,7 @@ static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
   CHECK(!param->success);
   ReportUnsuspendedThreads(suspended_threads);
   ClassifyAllChunks(suspended_threads, &param->frontier);
-  ForEachChunk(CollectLeaksCb, &param->leak_report);
+  ForEachChunk(CollectLeaksCb, &param->leaks);
   // Clean up for subsequent leak checks. This assumes we did not overwrite any
   // kIgnored tags.
   ForEachChunk(ResetTagsCb, nullptr);
@@ -754,17 +752,20 @@ static bool CheckForLeaks() {
           "etc)\n");
       Die();
     }
+    LeakReport leak_report;
+    leak_report.AddLeakedChunks(param.leaks);
+
     // No new suppressions stacks, so rerun will not help and we can report.
-    if (!param.leak_report.ApplySuppressions())
-      return PrintResults(param.leak_report);
+    if (!leak_report.ApplySuppressions())
+      return PrintResults(leak_report);
 
     // No indirect leaks to report, so we are done here.
-    if (!param.leak_report.IndirectUnsuppressedLeakCount())
-      return PrintResults(param.leak_report);
+    if (!leak_report.IndirectUnsuppressedLeakCount())
+      return PrintResults(leak_report);
 
     if (i >= 8) {
       Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
-      return PrintResults(param.leak_report);
+      return PrintResults(leak_report);
     }
 
     // We found a new previously unseen suppressed call stack. Rerun to make
@@ -801,40 +802,45 @@ void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
 // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
 // in real-world applications.
-// FIXME: Get rid of this limit by changing the implementation of LeakReport to
-// use a hash table.
+// FIXME: Get rid of this limit by moving logic into DedupLeaks.
 const uptr kMaxLeaksConsidered = 5000;
 
-void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
-                                uptr leaked_size, ChunkTag tag) {
-  CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
-
-  if (u32 resolution = flags()->resolution) {
-    StackTrace stack = StackDepotGet(stack_trace_id);
-    stack.size = Min(stack.size, resolution);
-    stack_trace_id = StackDepotPut(stack);
-  }
+void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
+  for (const LeakedChunk &leak : chunks) {
+    uptr chunk = leak.chunk;
+    u32 stack_trace_id = leak.stack_trace_id;
+    uptr leaked_size = leak.leaked_size;
+    ChunkTag tag = leak.tag;
+    CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
+
+    if (u32 resolution = flags()->resolution) {
+      StackTrace stack = StackDepotGet(stack_trace_id);
+      stack.size = Min(stack.size, resolution);
+      stack_trace_id = StackDepotPut(stack);
+    }
 
-  bool is_directly_leaked = (tag == kDirectlyLeaked);
-  uptr i;
-  for (i = 0; i < leaks_.size(); i++) {
-    if (leaks_[i].stack_trace_id == stack_trace_id &&
-        leaks_[i].is_directly_leaked == is_directly_leaked) {
-      leaks_[i].hit_count++;
-      leaks_[i].total_size += leaked_size;
-      break;
+    bool is_directly_leaked = (tag == kDirectlyLeaked);
+    uptr i;
+    for (i = 0; i < leaks_.size(); i++) {
+      if (leaks_[i].stack_trace_id == stack_trace_id &&
+          leaks_[i].is_directly_leaked == is_directly_leaked) {
+        leaks_[i].hit_count++;
+        leaks_[i].total_size += leaked_size;
+        break;
+      }
+    }
+    if (i == leaks_.size()) {
+      if (leaks_.size() == kMaxLeaksConsidered)
+        return;
+      Leak leak = {next_id_++,         /* hit_count */ 1,
+                   leaked_size,        stack_trace_id,
+                   is_directly_leaked, /* is_suppressed */ false};
+      leaks_.push_back(leak);
+    }
+    if (flags()->report_objects) {
+      LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
+      leaked_objects_.push_back(obj);
     }
-  }
-  if (i == leaks_.size()) {
-    if (leaks_.size() == kMaxLeaksConsidered)
-      return;
-    Leak leak = {next_id_++,     /* hit_count */ 1,  leaked_size,
-                 stack_trace_id, is_directly_leaked, /* is_suppressed */ false};
-    leaks_.push_back(leak);
-  }
-  if (flags()->report_objects) {
-    LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
-    leaked_objects_.push_back(obj);
   }
 }
 

diff  --git a/compiler-rt/lib/lsan/lsan_common.h b/compiler-rt/lib/lsan/lsan_common.h
index 0ceefa274b70..db5e30146f16 100644
--- a/compiler-rt/lib/lsan/lsan_common.h
+++ b/compiler-rt/lib/lsan/lsan_common.h
@@ -82,6 +82,15 @@ extern Flags lsan_flags;
 inline Flags *flags() { return &lsan_flags; }
 void RegisterLsanFlags(FlagParser *parser, Flags *f);
 
+struct LeakedChunk {
+  uptr chunk;
+  u32 stack_trace_id;
+  uptr leaked_size;
+  ChunkTag tag;
+};
+
+using LeakedChunks = InternalMmapVector<LeakedChunk>;
+
 struct Leak {
   u32 id;
   uptr hit_count;
@@ -101,8 +110,7 @@ struct LeakedObject {
 class LeakReport {
  public:
   LeakReport() {}
-  void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size,
-                      ChunkTag tag);
+  void AddLeakedChunks(const LeakedChunks &chunks);
   void ReportTopLeaks(uptr max_leaks);
   void PrintSummary();
   uptr ApplySuppressions();
@@ -136,7 +144,7 @@ struct RootRegion {
 // threads and enumerating roots.
 struct CheckForLeaksParam {
   Frontier frontier;
-  LeakReport leak_report;
+  LeakedChunks leaks;
   bool success = false;
 };
 


        


More information about the llvm-commits mailing list