[Lldb-commits] [lldb] Private process events were being delivered to the secondary listener (PR #98571)

via lldb-commits lldb-commits at lists.llvm.org
Thu Jul 11 17:57:27 PDT 2024


https://github.com/jimingham created https://github.com/llvm/llvm-project/pull/98571

This fixes a bug where Process events were being delivered to secondary listeners when the Private state thread listener was processing the event.  That meant the secondary listener could get an event before the Primary listener did.  That in turn meant the state when the secondary listener got the event wasn't right yet.  Plus it meant that the secondary listener saw more events than the primary (not all events get forwarded from the private to the public Process listener.)

This bug became much more evident when we had a stop hook that did some work, since that delays the Primary listener event delivery.  So I also added a stop-hook to the test, and put a little delay in as well.

>From fb563e516f3a73d508ea7b3a61df4f1bab2f33a6 Mon Sep 17 00:00:00 2001
From: Jim Ingham <jingham at apple.com>
Date: Thu, 11 Jul 2024 17:50:08 -0700
Subject: [PATCH] Fix a bug where process events were being delivered to the
 secondary Listener when the Private event queue was processing the event. 
 That meant the secondary listener could get an event before the Primary
 listener did.  That in turn meant the state when the secondary listener got
 the event wasn't right yet.  Plus it meant that the secondary listener saw
 more events than the primary (not all events get forwarded from the private
 to the public Process listener.)

This bug became much more evident when we had a stop hook that did some work,
since that delays the Primary listener event delivery.  So I also added a stop-hook
to the test, and put a little delay in as well.
---
 lldb/include/lldb/Target/Process.h           |  3 ++
 lldb/include/lldb/Utility/Event.h            | 13 +++++
 lldb/source/Target/Process.cpp               | 14 ++++++
 lldb/source/Utility/Event.cpp                | 16 ++++--
 lldb/test/API/python_api/event/TestEvents.py | 53 +++++++++++++++++---
 5 files changed, 86 insertions(+), 13 deletions(-)

diff --git a/lldb/include/lldb/Target/Process.h b/lldb/include/lldb/Target/Process.h
index ceaf547ebddaf..37fc192b4469c 100644
--- a/lldb/include/lldb/Target/Process.h
+++ b/lldb/include/lldb/Target/Process.h
@@ -465,6 +465,9 @@ class Process : public std::enable_shared_from_this<Process>,
     static bool SetUpdateStateOnRemoval(Event *event_ptr);
 
   private:
+  
+    bool ForwardEventToPendingListeners(Event *event_ptr) override;
+
     void SetUpdateStateOnRemoval() { m_update_state++; }
 
     void SetRestarted(bool new_value) { m_restarted = new_value; }
diff --git a/lldb/include/lldb/Utility/Event.h b/lldb/include/lldb/Utility/Event.h
index 461d711b8c3f2..5bea328d1faa9 100644
--- a/lldb/include/lldb/Utility/Event.h
+++ b/lldb/include/lldb/Utility/Event.h
@@ -48,6 +48,19 @@ class EventData {
   virtual void Dump(Stream *s) const;
 
 private:
+  /// This will be queried for a Broadcaster with a primary and some secondary
+  /// listeners after the primary listener pulled the event from the event queue
+  /// and ran its DoOnRemoval, right before the event is delivered.  
+  /// If it returns true, the event will also be forwarded to the secondary  
+  /// listeners, and if false, event propagation stops at the primary listener. 
+  /// Some broadcasters (particularly the Process broadcaster) fetch events on
+  /// a private Listener, and then forward the event to the Public Listeners
+  /// after some processing.  The Process broadcaster does not want to forward
+  /// to the secondary listeners at the private processing stage.
+  virtual bool ForwardEventToPendingListeners(Event *event_ptr) {
+    return true;
+  }
+
   virtual void DoOnRemoval(Event *event_ptr) {}
 
   EventData(const EventData &) = delete;
diff --git a/lldb/source/Target/Process.cpp b/lldb/source/Target/Process.cpp
index 6fac0df1d7a66..a43684c999641 100644
--- a/lldb/source/Target/Process.cpp
+++ b/lldb/source/Target/Process.cpp
@@ -4248,7 +4248,21 @@ bool Process::ProcessEventData::ShouldStop(Event *event_ptr,
   return still_should_stop;
 }
 
+bool Process::ProcessEventData::ForwardEventToPendingListeners(Event *event_ptr) {
+  // STDIO and the other async event notifications should always be forwarded.
+  if (event_ptr->GetType() != Process::eBroadcastBitStateChanged)
+    return true;
+
+  // For state changed events, if the update state is one, we are handling 
+  // this on the private state thread.  We should wait for the public event.
+  return m_update_state == 1;
+}
+
 void Process::ProcessEventData::DoOnRemoval(Event *event_ptr) {
+  // We only have work to do for state changed events:
+  if (event_ptr->GetType() != Process::eBroadcastBitStateChanged)
+    return;
+
   ProcessSP process_sp(m_process_wp.lock());
 
   if (!process_sp)
diff --git a/lldb/source/Utility/Event.cpp b/lldb/source/Utility/Event.cpp
index 863167e56bce6..5f431c0a6dd89 100644
--- a/lldb/source/Utility/Event.cpp
+++ b/lldb/source/Utility/Event.cpp
@@ -83,14 +83,20 @@ void Event::Dump(Stream *s) const {
 void Event::DoOnRemoval() {
   std::lock_guard<std::mutex> guard(m_listeners_mutex);
 
-  if (m_data_sp)
-    m_data_sp->DoOnRemoval(this);
+  if (!m_data_sp)
+    return;
+
+  m_data_sp->DoOnRemoval(this);
+
   // Now that the event has been handled by the primary event Listener, forward
   // it to the other Listeners.
+
   EventSP me_sp = shared_from_this();
-  for (auto listener_sp : m_pending_listeners)
-    listener_sp->AddEvent(me_sp);
-  m_pending_listeners.clear();
+  if (m_data_sp->ForwardEventToPendingListeners(this)) {
+    for (auto listener_sp : m_pending_listeners)
+      listener_sp->AddEvent(me_sp);
+    m_pending_listeners.clear();
+  }
 }
 
 #pragma mark -
diff --git a/lldb/test/API/python_api/event/TestEvents.py b/lldb/test/API/python_api/event/TestEvents.py
index d8d3dd2d2b01b..ba9de897cf562 100644
--- a/lldb/test/API/python_api/event/TestEvents.py
+++ b/lldb/test/API/python_api/event/TestEvents.py
@@ -7,7 +7,7 @@
 from lldbsuite.test.decorators import *
 from lldbsuite.test.lldbtest import *
 from lldbsuite.test import lldbutil
-
+import random
 
 @skipIfLinux  # llvm.org/pr25924, sometimes generating SIGSEGV
 class EventAPITestCase(TestBase):
@@ -20,6 +20,7 @@ def setUp(self):
         self.line = line_number(
             "main.c", '// Find the line number of function "c" here.'
         )
+        random.seed()
 
     @expectedFailureAll(
         oslist=["linux"], bugnumber="llvm.org/pr23730 Flaky, fails ~1/10 cases"
@@ -318,6 +319,7 @@ def wait_for_next_event(self, expected_state, test_shadow=False):
         """Wait for an event from self.primary & self.shadow listener.
         If test_shadow is true, we also check that the shadow listener only
         receives events AFTER the primary listener does."""
+        import stop_hook
         # Waiting on the shadow listener shouldn't have events yet because
         # we haven't fetched them for the primary listener yet:
         event = lldb.SBEvent()
@@ -328,13 +330,25 @@ def wait_for_next_event(self, expected_state, test_shadow=False):
 
         # But there should be an event for the primary listener:
         success = self.primary_listener.WaitForEvent(5, event)
+
         self.assertTrue(success, "Primary listener got the event")
 
         state = lldb.SBProcess.GetStateFromEvent(event)
+        primary_event_type = event.GetType()
         restart = False
         if state == lldb.eStateStopped:
             restart = lldb.SBProcess.GetRestartedFromEvent(event)
-
+            # This counter is matching the stop hooks, which don't get run
+            # for auto-restarting stops.
+            if not restart:
+                self.stop_counter += 1
+                self.assertEqual(
+                    stop_hook.StopHook.counter[self.instance],
+                    self.stop_counter,
+                    "matching stop hook"
+                )
+                
+            
         if expected_state is not None:
             self.assertEqual(
                 state, expected_state, "Primary thread got the correct event"
@@ -344,15 +358,22 @@ def wait_for_next_event(self, expected_state, test_shadow=False):
         # listener:
         success = self.shadow_listener.WaitForEvent(5, event)
         self.assertTrue(success, "Shadow listener got event too")
+        shadow_event_type = event.GetType()
         self.assertEqual(
-            state, lldb.SBProcess.GetStateFromEvent(event), "It was the same event"
+            primary_event_type,
+            shadow_event_type,
+            "It was the same event type"
+        ) 
+        self.assertEqual(
+            state,
+            lldb.SBProcess.GetStateFromEvent(event),
+            "It was the same state"
         )
         self.assertEqual(
             restart,
             lldb.SBProcess.GetRestartedFromEvent(event),
             "It was the same restarted",
         )
-
         return state, restart
 
     @expectedFlakeyLinux("llvm.org/pr23730")  # Flaky, fails ~1/100 cases
@@ -386,6 +407,17 @@ def test_shadow_listener(self):
         )
         self.dbg.SetAsync(True)
 
+        # Now make our stop hook - we want to ensure it stays up to date with
+        # the events.  We can't get our hands on the stop-hook instance directly,
+        # so we'll pass in an instance key, and use that to retrieve the data from
+        # this instance of the stop hook:
+        self.instance = f"Key{random.randint(0,10000)}"
+        stop_hook_path = os.path.join(self.getSourceDir(), "stop_hook.py")
+        self.runCmd(f"command script import {stop_hook_path}")
+        import stop_hook
+        self.runCmd(f"target stop-hook add -P stop_hook.StopHook -k instance -v {self.instance}")
+        self.stop_counter = 0
+        
         self.process = target.Launch(launch_info, error)
         self.assertSuccess(error, "Process launched successfully")
 
@@ -395,12 +427,13 @@ def test_shadow_listener(self):
         # Events in the launch sequence might be platform dependent, so don't
         # expect any particular event till we get the stopped:
         state = lldb.eStateInvalid
+        
         while state != lldb.eStateStopped:
             state, restart = self.wait_for_next_event(None, False)
-
+        
         # Okay, we're now at a good stop, so try a next:
         self.cur_thread = self.process.threads[0]
-
+        
         # Make sure we're at our expected breakpoint:
         self.assertTrue(self.cur_thread.IsValid(), "Got a zeroth thread")
         self.assertEqual(self.cur_thread.stop_reason, lldb.eStopReasonBreakpoint)
@@ -412,8 +445,6 @@ def test_shadow_listener(self):
             self.cur_thread.GetStopReasonDataAtIndex(0),
             "Hit the right breakpoint",
         )
-        # Disable the first breakpoint so it doesn't get in the way...
-        bkpt1.SetEnabled(False)
 
         self.cur_thread.StepOver()
         # We'll run the test for "shadow listener blocked by primary listener
@@ -443,6 +474,7 @@ def test_shadow_listener(self):
         # Put in a counter to make sure we don't spin forever if there is some
         # error in the logic.
         counter = 0
+        run_wo_stop = False
         while state != lldb.eStateExited:
             counter += 1
             self.assertLess(
@@ -450,4 +482,9 @@ def test_shadow_listener(self):
             )
             if state == lldb.eStateStopped and not restarted:
                 self.process.Continue()
+
             state, restarted = self.wait_for_next_event(None, False)
+
+        # Now make sure we agree with the stop hook counter:
+        self.assertEqual(self.stop_counter, stop_hook.StopHook.counter[self.instance])
+        self.assertEqual(stop_hook.StopHook.non_stops[self.instance], 0, "No non stops")



More information about the lldb-commits mailing list