[Lldb-commits] [lldb] b42a816 - Convert ThreadPlanStack's mutex to a shared mutex. (#116438)

via lldb-commits lldb-commits at lists.llvm.org
Mon Nov 18 13:23:23 PST 2024


Author: jimingham
Date: 2024-11-18T13:23:17-08:00
New Revision: b42a81631491571c4b78d095917ebdddee69b04f

URL: https://github.com/llvm/llvm-project/commit/b42a81631491571c4b78d095917ebdddee69b04f
DIFF: https://github.com/llvm/llvm-project/commit/b42a81631491571c4b78d095917ebdddee69b04f.diff

LOG: Convert ThreadPlanStack's mutex to a shared mutex. (#116438)

I have some reports of A/B inversion deadlocks between the
ThreadPlanStack and the StackFrameList accesses. There's a fair bit of
reasonable code in lldb that does "While accessing the ThreadPlanStack,
look at that threads's StackFrameList", and also plenty of "While
accessing the ThreadPlanStack, look at the StackFrameList."

In all the cases I've seen so far, there was at most one of the locks
taken that were trying to mutate the list, the other three were just
reading. So we could solve the deadlock by converting the two mutexes
over to shared mutexes.

This patch is the easy part, the ThreadPlanStack mutex.  

The tricky part was because these were originally recursive mutexes, and
recursive access to shared mutexes is undefined behavior according to
the C++ standard, I had to add a couple NoLock variants to make sure it
didn't get used recursively. Then since the only remaining calls are out
to ThreadPlans and ThreadPlans don't have access to their containing
ThreadPlanStack, converting this to a non-recursive lock should be safe.

Added: 
    

Modified: 
    lldb/include/lldb/Target/ThreadPlanStack.h
    lldb/source/Target/ThreadPlanStack.cpp

Removed: 
    


################################################################################
diff  --git a/lldb/include/lldb/Target/ThreadPlanStack.h b/lldb/include/lldb/Target/ThreadPlanStack.h
index e6a560a509261f..e0f8104de9a4d1 100644
--- a/lldb/include/lldb/Target/ThreadPlanStack.h
+++ b/lldb/include/lldb/Target/ThreadPlanStack.h
@@ -14,6 +14,8 @@
 #include <unordered_map>
 #include <vector>
 
+#include "llvm/Support/RWMutex.h"
+
 #include "lldb/Target/Target.h"
 #include "lldb/Target/Thread.h"
 #include "lldb/lldb-private-forward.h"
@@ -96,9 +98,12 @@ class ThreadPlanStack {
   void ClearThreadCache();
 
 private:
-  void PrintOneStack(Stream &s, llvm::StringRef stack_name,
-                     const PlanStack &stack, lldb::DescriptionLevel desc_level,
-                     bool include_internal) const;
+  lldb::ThreadPlanSP DiscardPlanNoLock();
+  lldb::ThreadPlanSP GetCurrentPlanNoLock() const;
+  void PrintOneStackNoLock(Stream &s, llvm::StringRef stack_name,
+                           const PlanStack &stack,
+                           lldb::DescriptionLevel desc_level,
+                           bool include_internal) const;
 
   PlanStack m_plans;           ///< The stack of plans this thread is executing.
   PlanStack m_completed_plans; ///< Plans that have been completed by this
@@ -110,7 +115,7 @@ class ThreadPlanStack {
   size_t m_completed_plan_checkpoint = 0; // Monotonically increasing token for
                                           // completed plan checkpoints.
   std::unordered_map<size_t, PlanStack> m_completed_plan_store;
-  mutable std::recursive_mutex m_stack_mutex;
+  mutable llvm::sys::RWMutex m_stack_mutex;
 };
 
 class ThreadPlanStackMap {

diff  --git a/lldb/source/Target/ThreadPlanStack.cpp b/lldb/source/Target/ThreadPlanStack.cpp
index 1572931429071d..d5d600dda47a3f 100644
--- a/lldb/source/Target/ThreadPlanStack.cpp
+++ b/lldb/source/Target/ThreadPlanStack.cpp
@@ -39,21 +39,21 @@ ThreadPlanStack::ThreadPlanStack(const Thread &thread, bool make_null) {
 void ThreadPlanStack::DumpThreadPlans(Stream &s,
                                       lldb::DescriptionLevel desc_level,
                                       bool include_internal) const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   s.IndentMore();
-  PrintOneStack(s, "Active plan stack", m_plans, desc_level, include_internal);
-  PrintOneStack(s, "Completed plan stack", m_completed_plans, desc_level,
-                include_internal);
-  PrintOneStack(s, "Discarded plan stack", m_discarded_plans, desc_level,
-                include_internal);
+  PrintOneStackNoLock(s, "Active plan stack", m_plans, desc_level,
+                      include_internal);
+  PrintOneStackNoLock(s, "Completed plan stack", m_completed_plans, desc_level,
+                      include_internal);
+  PrintOneStackNoLock(s, "Discarded plan stack", m_discarded_plans, desc_level,
+                      include_internal);
   s.IndentLess();
 }
 
-void ThreadPlanStack::PrintOneStack(Stream &s, llvm::StringRef stack_name,
-                                    const PlanStack &stack,
-                                    lldb::DescriptionLevel desc_level,
-                                    bool include_internal) const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+void ThreadPlanStack::PrintOneStackNoLock(Stream &s, llvm::StringRef stack_name,
+                                          const PlanStack &stack,
+                                          lldb::DescriptionLevel desc_level,
+                                          bool include_internal) const {
   // If the stack is empty, just exit:
   if (stack.empty())
     return;
@@ -82,7 +82,7 @@ void ThreadPlanStack::PrintOneStack(Stream &s, llvm::StringRef stack_name,
 }
 
 size_t ThreadPlanStack::CheckpointCompletedPlans() {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   m_completed_plan_checkpoint++;
   m_completed_plan_store.insert(
       std::make_pair(m_completed_plan_checkpoint, m_completed_plans));
@@ -90,7 +90,7 @@ size_t ThreadPlanStack::CheckpointCompletedPlans() {
 }
 
 void ThreadPlanStack::RestoreCompletedPlanCheckpoint(size_t checkpoint) {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   auto result = m_completed_plan_store.find(checkpoint);
   assert(result != m_completed_plan_store.end() &&
          "Asked for a checkpoint that didn't exist");
@@ -99,13 +99,13 @@ void ThreadPlanStack::RestoreCompletedPlanCheckpoint(size_t checkpoint) {
 }
 
 void ThreadPlanStack::DiscardCompletedPlanCheckpoint(size_t checkpoint) {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   m_completed_plan_store.erase(checkpoint);
 }
 
 void ThreadPlanStack::ThreadDestroyed(Thread *thread) {
   // Tell the plan stacks that this thread is going away:
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   for (ThreadPlanSP plan : m_plans)
     plan->ThreadDestroyed();
 
@@ -134,20 +134,22 @@ void ThreadPlanStack::PushPlan(lldb::ThreadPlanSP new_plan_sp) {
   // If the thread plan doesn't already have a tracer, give it its parent's
   // tracer:
   // The first plan has to be a base plan:
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
-  assert((m_plans.size() > 0 || new_plan_sp->IsBasePlan()) &&
-         "Zeroth plan must be a base plan");
-
-  if (!new_plan_sp->GetThreadPlanTracer()) {
-    assert(!m_plans.empty());
-    new_plan_sp->SetThreadPlanTracer(m_plans.back()->GetThreadPlanTracer());
+  { // Scope for Lock - DidPush often adds plans to the stack:
+    llvm::sys::ScopedWriter guard(m_stack_mutex);
+    assert((m_plans.size() > 0 || new_plan_sp->IsBasePlan()) &&
+           "Zeroth plan must be a base plan");
+
+    if (!new_plan_sp->GetThreadPlanTracer()) {
+      assert(!m_plans.empty());
+      new_plan_sp->SetThreadPlanTracer(m_plans.back()->GetThreadPlanTracer());
+    }
+    m_plans.push_back(new_plan_sp);
   }
-  m_plans.push_back(new_plan_sp);
   new_plan_sp->DidPush();
 }
 
 lldb::ThreadPlanSP ThreadPlanStack::PopPlan() {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   assert(m_plans.size() > 1 && "Can't pop the base thread plan");
 
   // Note that moving the top element of the vector would leave it in an
@@ -161,7 +163,11 @@ lldb::ThreadPlanSP ThreadPlanStack::PopPlan() {
 }
 
 lldb::ThreadPlanSP ThreadPlanStack::DiscardPlan() {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
+  return DiscardPlanNoLock();
+}
+
+lldb::ThreadPlanSP ThreadPlanStack::DiscardPlanNoLock() {
   assert(m_plans.size() > 1 && "Can't discard the base thread plan");
 
   // Note that moving the top element of the vector would leave it in an
@@ -177,12 +183,12 @@ lldb::ThreadPlanSP ThreadPlanStack::DiscardPlan() {
 // If the input plan is nullptr, discard all plans.  Otherwise make sure this
 // plan is in the stack, and if so discard up to and including it.
 void ThreadPlanStack::DiscardPlansUpToPlan(ThreadPlan *up_to_plan_ptr) {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   int stack_size = m_plans.size();
 
   if (up_to_plan_ptr == nullptr) {
     for (int i = stack_size - 1; i > 0; i--)
-      DiscardPlan();
+      DiscardPlanNoLock();
     return;
   }
 
@@ -197,23 +203,23 @@ void ThreadPlanStack::DiscardPlansUpToPlan(ThreadPlan *up_to_plan_ptr) {
   if (found_it) {
     bool last_one = false;
     for (int i = stack_size - 1; i > 0 && !last_one; i--) {
-      if (GetCurrentPlan().get() == up_to_plan_ptr)
+      if (GetCurrentPlanNoLock().get() == up_to_plan_ptr)
         last_one = true;
-      DiscardPlan();
+      DiscardPlanNoLock();
     }
   }
 }
 
 void ThreadPlanStack::DiscardAllPlans() {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   int stack_size = m_plans.size();
   for (int i = stack_size - 1; i > 0; i--) {
-    DiscardPlan();
+    DiscardPlanNoLock();
   }
 }
 
 void ThreadPlanStack::DiscardConsultingControllingPlans() {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   while (true) {
     int controlling_plan_idx;
     bool discard = true;
@@ -234,26 +240,30 @@ void ThreadPlanStack::DiscardConsultingControllingPlans() {
 
     // First pop all the dependent plans:
     for (int i = m_plans.size() - 1; i > controlling_plan_idx; i--) {
-      DiscardPlan();
+      DiscardPlanNoLock();
     }
 
     // Now discard the controlling plan itself.
     // The bottom-most plan never gets discarded.  "OkayToDiscard" for it
     // means discard it's dependent plans, but not it...
     if (controlling_plan_idx > 0) {
-      DiscardPlan();
+      DiscardPlanNoLock();
     }
   }
 }
 
 lldb::ThreadPlanSP ThreadPlanStack::GetCurrentPlan() const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
+  return GetCurrentPlanNoLock();
+}
+
+lldb::ThreadPlanSP ThreadPlanStack::GetCurrentPlanNoLock() const {
   assert(m_plans.size() != 0 && "There will always be a base plan.");
   return m_plans.back();
 }
 
 lldb::ThreadPlanSP ThreadPlanStack::GetCompletedPlan(bool skip_private) const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   if (m_completed_plans.empty())
     return {};
 
@@ -271,7 +281,7 @@ lldb::ThreadPlanSP ThreadPlanStack::GetCompletedPlan(bool skip_private) const {
 
 lldb::ThreadPlanSP ThreadPlanStack::GetPlanByIndex(uint32_t plan_idx,
                                                    bool skip_private) const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   uint32_t idx = 0;
 
   for (lldb::ThreadPlanSP plan_sp : m_plans) {
@@ -285,7 +295,7 @@ lldb::ThreadPlanSP ThreadPlanStack::GetPlanByIndex(uint32_t plan_idx,
 }
 
 lldb::ValueObjectSP ThreadPlanStack::GetReturnValueObject() const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   if (m_completed_plans.empty())
     return {};
 
@@ -299,7 +309,7 @@ lldb::ValueObjectSP ThreadPlanStack::GetReturnValueObject() const {
 }
 
 lldb::ExpressionVariableSP ThreadPlanStack::GetExpressionVariable() const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   if (m_completed_plans.empty())
     return {};
 
@@ -312,23 +322,23 @@ lldb::ExpressionVariableSP ThreadPlanStack::GetExpressionVariable() const {
   return {};
 }
 bool ThreadPlanStack::AnyPlans() const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   // There is always a base plan...
   return m_plans.size() > 1;
 }
 
 bool ThreadPlanStack::AnyCompletedPlans() const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   return !m_completed_plans.empty();
 }
 
 bool ThreadPlanStack::AnyDiscardedPlans() const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   return !m_discarded_plans.empty();
 }
 
 bool ThreadPlanStack::IsPlanDone(ThreadPlan *in_plan) const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   for (auto plan : m_completed_plans) {
     if (plan.get() == in_plan)
       return true;
@@ -337,7 +347,7 @@ bool ThreadPlanStack::IsPlanDone(ThreadPlan *in_plan) const {
 }
 
 bool ThreadPlanStack::WasPlanDiscarded(ThreadPlan *in_plan) const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   for (auto plan : m_discarded_plans) {
     if (plan.get() == in_plan)
       return true;
@@ -346,7 +356,7 @@ bool ThreadPlanStack::WasPlanDiscarded(ThreadPlan *in_plan) const {
 }
 
 ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   if (current_plan == nullptr)
     return nullptr;
 
@@ -361,7 +371,7 @@ ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const {
   // If this is the first completed plan, the previous one is the
   // bottom of the regular plan stack.
   if (stack_size > 0 && m_completed_plans[0].get() == current_plan) {
-    return GetCurrentPlan().get();
+    return GetCurrentPlanNoLock().get();
   }
 
   // Otherwise look for it in the regular plans.
@@ -374,7 +384,7 @@ ThreadPlan *ThreadPlanStack::GetPreviousPlan(ThreadPlan *current_plan) const {
 }
 
 ThreadPlan *ThreadPlanStack::GetInnermostExpression() const {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   int stack_size = m_plans.size();
 
   for (int i = stack_size - 1; i > 0; i--) {
@@ -385,13 +395,13 @@ ThreadPlan *ThreadPlanStack::GetInnermostExpression() const {
 }
 
 void ThreadPlanStack::ClearThreadCache() {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedReader guard(m_stack_mutex);
   for (lldb::ThreadPlanSP thread_plan_sp : m_plans)
     thread_plan_sp->ClearThreadCache();
 }
 
 void ThreadPlanStack::WillResume() {
-  std::lock_guard<std::recursive_mutex> guard(m_stack_mutex);
+  llvm::sys::ScopedWriter guard(m_stack_mutex);
   m_completed_plans.clear();
   m_discarded_plans.clear();
 }


        


More information about the lldb-commits mailing list