[Lldb-commits] [lldb] 434905b - Run all threads when extending a next range over a call.
Jim Ingham via lldb-commits
lldb-commits at lists.llvm.org
Mon Dec 16 17:48:22 PST 2019
Author: Jim Ingham
Date: 2019-12-16T17:45:21-08:00
New Revision: 434905b97d961531286d4b49c7ee1969f7cbea0e
URL: https://github.com/llvm/llvm-project/commit/434905b97d961531286d4b49c7ee1969f7cbea0e
DIFF: https://github.com/llvm/llvm-project/commit/434905b97d961531286d4b49c7ee1969f7cbea0e.diff
LOG: Run all threads when extending a next range over a call.
If you don't do this you end up running arbitrary code with
only one thread allowed to run, which can cause deadlocks.
<rdar://problem/56422478>
Differential Revision: https://reviews.llvm.org/D71440
Added:
lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.cpp
lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/Makefile
lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/TestStepOverDoesntBlock.py
lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/locking.cpp
Modified:
lldb/include/lldb/Core/Disassembler.h
lldb/include/lldb/Target/ThreadPlanStepRange.h
lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/Makefile
lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/TestExprDoesntBlock.py
lldb/source/Core/Disassembler.cpp
lldb/source/Target/Process.cpp
lldb/source/Target/ThreadPlanStepRange.cpp
Removed:
lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.c
################################################################################
diff --git a/lldb/include/lldb/Core/Disassembler.h b/lldb/include/lldb/Core/Disassembler.h
index ba9ca87832f6..7ece0eeb708c 100644
--- a/lldb/include/lldb/Core/Disassembler.h
+++ b/lldb/include/lldb/Core/Disassembler.h
@@ -287,6 +287,10 @@ class InstructionList {
/// a function call (a branch that calls and returns to the next
/// instruction). If false, find the instruction index of any
/// branch in the list.
+ ///
+ /// @param[out] found_calls
+ /// If non-null, this will be set to true if any calls were found in
+ /// extending the range.
///
/// @return
/// The instruction index of the first branch that is at or past
@@ -295,7 +299,8 @@ class InstructionList {
//------------------------------------------------------------------
uint32_t GetIndexOfNextBranchInstruction(uint32_t start,
Target &target,
- bool ignore_calls) const;
+ bool ignore_calls,
+ bool *found_calls) const;
uint32_t GetIndexOfInstructionAtLoadAddress(lldb::addr_t load_addr,
Target &target);
diff --git a/lldb/include/lldb/Target/ThreadPlanStepRange.h b/lldb/include/lldb/Target/ThreadPlanStepRange.h
index 93d54ad7dfd5..28209623a1e1 100644
--- a/lldb/include/lldb/Target/ThreadPlanStepRange.h
+++ b/lldb/include/lldb/Target/ThreadPlanStepRange.h
@@ -76,6 +76,12 @@ class ThreadPlanStepRange : public ThreadPlan {
lldb::BreakpointSP m_next_branch_bp_sp;
bool m_use_fast_step;
bool m_given_ranges_only;
+ bool m_found_calls = false; // When we set the next branch breakpoint for
+ // step over, we now extend them past call insns
+ // that directly return. But if we do that we
+ // need to run all threads, or we might cause
+ // deadlocks. This tells us whether we found
+ // any calls in setting the next branch breakpoint.
private:
std::vector<lldb::DisassemblerSP> m_instruction_ranges;
diff --git a/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/Makefile b/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/Makefile
index f63adb4f5d2a..ee0d4690d834 100644
--- a/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/Makefile
+++ b/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/Makefile
@@ -1,4 +1,4 @@
-C_SOURCES := locking.c
+CXX_SOURCES := locking.cpp
ENABLE_THREADS := YES
include Makefile.rules
diff --git a/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/TestExprDoesntBlock.py b/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/TestExprDoesntBlock.py
index 5b5042b63e4b..d7d963390b05 100644
--- a/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/TestExprDoesntBlock.py
+++ b/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/TestExprDoesntBlock.py
@@ -16,9 +16,6 @@ class ExprDoesntDeadlockTestCase(TestBase):
mydir = TestBase.compute_mydir(__file__)
@expectedFailureAll(oslist=['freebsd'], bugnumber='llvm.org/pr17946')
- @expectedFailureAll(
- oslist=["windows"],
- bugnumber="Windows doesn't have pthreads, test needs to be ported")
@add_test_categories(["basic_process"])
def test_with_run_command(self):
"""Test that expr will time out and allow other threads to run if it blocks."""
@@ -32,7 +29,7 @@ def test_with_run_command(self):
# Now create a breakpoint at source line before call_me_to_get_lock
# gets called.
- main_file_spec = lldb.SBFileSpec("locking.c")
+ main_file_spec = lldb.SBFileSpec("locking.cpp")
breakpoint = target.BreakpointCreateBySourceRegex(
'Break here', main_file_spec)
if self.TraceOn():
@@ -55,6 +52,6 @@ def test_with_run_command(self):
frame0 = thread.GetFrameAtIndex(0)
- var = frame0.EvaluateExpression("call_me_to_get_lock()")
+ var = frame0.EvaluateExpression("call_me_to_get_lock(get_int())")
self.assertTrue(var.IsValid())
- self.assertTrue(var.GetValueAsSigned(0) == 567)
+ self.assertEqual(var.GetValueAsSigned(0), 567)
diff --git a/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.c b/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.c
deleted file mode 100644
index fae9979611d5..000000000000
--- a/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.c
+++ /dev/null
@@ -1,80 +0,0 @@
-#include <pthread.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <stdio.h>
-
-pthread_mutex_t contended_mutex = PTHREAD_MUTEX_INITIALIZER;
-
-pthread_mutex_t control_mutex = PTHREAD_MUTEX_INITIALIZER;
-pthread_cond_t control_condition;
-
-pthread_mutex_t thread_started_mutex = PTHREAD_MUTEX_INITIALIZER;
-pthread_cond_t thread_started_condition;
-
-// This function runs in a thread. The locking dance is to make sure that
-// by the time the main thread reaches the pthread_join below, this thread
-// has for sure acquired the contended_mutex. So then the call_me_to_get_lock
-// function will block trying to get the mutex, and only succeed once it
-// signals this thread, then lets it run to wake up from the cond_wait and
-// release the mutex.
-
-void *
-lock_acquirer_1 (void *input)
-{
- pthread_mutex_lock (&contended_mutex);
-
- // Grab this mutex, that will ensure that the main thread
- // is in its cond_wait for it (since that's when it drops the mutex.
-
- pthread_mutex_lock (&thread_started_mutex);
- pthread_mutex_unlock(&thread_started_mutex);
-
- // Now signal the main thread that it can continue, we have the contended lock
- // so the call to call_me_to_get_lock won't make any progress till this
- // thread gets a chance to run.
-
- pthread_mutex_lock (&control_mutex);
-
- pthread_cond_signal (&thread_started_condition);
-
- pthread_cond_wait (&control_condition, &control_mutex);
-
- pthread_mutex_unlock (&contended_mutex);
- return NULL;
-}
-
-int
-call_me_to_get_lock ()
-{
- pthread_cond_signal (&control_condition);
- pthread_mutex_lock (&contended_mutex);
- return 567;
-}
-
-int main ()
-{
- pthread_t thread_1;
-
- pthread_cond_init (&control_condition, NULL);
- pthread_cond_init (&thread_started_condition, NULL);
-
- pthread_mutex_lock (&thread_started_mutex);
-
- pthread_create (&thread_1, NULL, lock_acquirer_1, NULL);
-
- pthread_cond_wait (&thread_started_condition, &thread_started_mutex);
-
- pthread_mutex_lock (&control_mutex);
- pthread_mutex_unlock (&control_mutex);
-
- // Break here. At this point the other thread will have the contended_mutex,
- // and be sitting in its cond_wait for the control condition. So there is
- // no way that our by-hand calling of call_me_to_get_lock will proceed
- // without running the first thread at least somewhat.
-
- call_me_to_get_lock();
- pthread_join (thread_1, NULL);
-
- return 0;
-
-}
diff --git a/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.cpp b/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.cpp
new file mode 100644
index 000000000000..fab3aa8c5635
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/commands/expression/no-deadlock/locking.cpp
@@ -0,0 +1,76 @@
+#include <stdio.h>
+#include <thread>
+
+std::mutex contended_mutex;
+
+std::mutex control_mutex;
+std::condition_variable control_condition;
+
+std::mutex thread_started_mutex;
+std::condition_variable thread_started_condition;
+
+// This function runs in a thread. The locking dance is to make sure that
+// by the time the main thread reaches the pthread_join below, this thread
+// has for sure acquired the contended_mutex. So then the call_me_to_get_lock
+// function will block trying to get the mutex, and only succeed once it
+// signals this thread, then lets it run to wake up from the cond_wait and
+// release the mutex.
+
+void
+lock_acquirer_1 (void)
+{
+ std::unique_lock<std::mutex> contended_lock(contended_mutex);
+
+ // Grab this mutex, that will ensure that the main thread
+ // is in its cond_wait for it (since that's when it drops the mutex.
+
+ thread_started_mutex.lock();
+ thread_started_mutex.unlock();
+
+ // Now signal the main thread that it can continue, we have the contended lock
+ // so the call to call_me_to_get_lock won't make any progress till this
+ // thread gets a chance to run.
+
+ std::unique_lock<std::mutex> control_lock(control_mutex);
+
+ thread_started_condition.notify_all();
+
+ control_condition.wait(control_lock);
+
+}
+
+int
+call_me_to_get_lock (int ret_val)
+{
+ control_condition.notify_all();
+ contended_mutex.lock();
+ return ret_val;
+}
+
+int
+get_int() {
+ return 567;
+}
+
+int main ()
+{
+ std::unique_lock<std::mutex> thread_started_lock(thread_started_mutex);
+
+ std::thread thread_1(lock_acquirer_1);
+
+ thread_started_condition.wait(thread_started_lock);
+
+ control_mutex.lock();
+ control_mutex.unlock();
+
+ // Break here. At this point the other thread will have the contended_mutex,
+ // and be sitting in its cond_wait for the control condition. So there is
+ // no way that our by-hand calling of call_me_to_get_lock will proceed
+ // without running the first thread at least somewhat.
+
+ int result = call_me_to_get_lock(get_int());
+ thread_1.join();
+
+ return 0;
+
+}
diff --git a/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/Makefile b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/Makefile
new file mode 100644
index 000000000000..ee0d4690d834
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/Makefile
@@ -0,0 +1,4 @@
+CXX_SOURCES := locking.cpp
+ENABLE_THREADS := YES
+
+include Makefile.rules
diff --git a/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/TestStepOverDoesntBlock.py b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/TestStepOverDoesntBlock.py
new file mode 100644
index 000000000000..988d90a7bb37
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/TestStepOverDoesntBlock.py
@@ -0,0 +1,30 @@
+"""
+Test that step over will let other threads run when necessary
+"""
+
+from __future__ import print_function
+
+
+import lldb
+from lldbsuite.test.decorators import *
+from lldbsuite.test.lldbtest import *
+from lldbsuite.test import lldbutil
+
+
+class StepOverDoesntDeadlockTestCase(TestBase):
+
+ mydir = TestBase.compute_mydir(__file__)
+
+ def test_step_over(self):
+ """Test that when step over steps over a function it lets other threads run."""
+ self.build()
+ (target, process, thread, bkpt) = lldbutil.run_to_source_breakpoint(self,
+ "without running the first thread at least somewhat",
+ lldb.SBFileSpec("locking.cpp"))
+ # This is just testing that the step over actually completes.
+ # If the test fails this step never return, so failure is really
+ # signaled by the test timing out.
+
+ thread.StepOver()
+ state = process.GetState()
+ self.assertEqual(state, lldb.eStateStopped)
diff --git a/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/locking.cpp b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/locking.cpp
new file mode 100644
index 000000000000..fab3aa8c5635
--- /dev/null
+++ b/lldb/packages/Python/lldbsuite/test/lang/c/step_over_no_deadlock/locking.cpp
@@ -0,0 +1,76 @@
+#include <stdio.h>
+#include <thread>
+
+std::mutex contended_mutex;
+
+std::mutex control_mutex;
+std::condition_variable control_condition;
+
+std::mutex thread_started_mutex;
+std::condition_variable thread_started_condition;
+
+// This function runs in a thread. The locking dance is to make sure that
+// by the time the main thread reaches the pthread_join below, this thread
+// has for sure acquired the contended_mutex. So then the call_me_to_get_lock
+// function will block trying to get the mutex, and only succeed once it
+// signals this thread, then lets it run to wake up from the cond_wait and
+// release the mutex.
+
+void
+lock_acquirer_1 (void)
+{
+ std::unique_lock<std::mutex> contended_lock(contended_mutex);
+
+ // Grab this mutex, that will ensure that the main thread
+ // is in its cond_wait for it (since that's when it drops the mutex.
+
+ thread_started_mutex.lock();
+ thread_started_mutex.unlock();
+
+ // Now signal the main thread that it can continue, we have the contended lock
+ // so the call to call_me_to_get_lock won't make any progress till this
+ // thread gets a chance to run.
+
+ std::unique_lock<std::mutex> control_lock(control_mutex);
+
+ thread_started_condition.notify_all();
+
+ control_condition.wait(control_lock);
+
+}
+
+int
+call_me_to_get_lock (int ret_val)
+{
+ control_condition.notify_all();
+ contended_mutex.lock();
+ return ret_val;
+}
+
+int
+get_int() {
+ return 567;
+}
+
+int main ()
+{
+ std::unique_lock<std::mutex> thread_started_lock(thread_started_mutex);
+
+ std::thread thread_1(lock_acquirer_1);
+
+ thread_started_condition.wait(thread_started_lock);
+
+ control_mutex.lock();
+ control_mutex.unlock();
+
+ // Break here. At this point the other thread will have the contended_mutex,
+ // and be sitting in its cond_wait for the control condition. So there is
+ // no way that our by-hand calling of call_me_to_get_lock will proceed
+ // without running the first thread at least somewhat.
+
+ int result = call_me_to_get_lock(get_int());
+ thread_1.join();
+
+ return 0;
+
+}
diff --git a/lldb/source/Core/Disassembler.cpp b/lldb/source/Core/Disassembler.cpp
index 89ae25cbad64..33172cc8868e 100644
--- a/lldb/source/Core/Disassembler.cpp
+++ b/lldb/source/Core/Disassembler.cpp
@@ -1101,15 +1101,22 @@ void InstructionList::Append(lldb::InstructionSP &inst_sp) {
uint32_t
InstructionList::GetIndexOfNextBranchInstruction(uint32_t start,
Target &target,
- bool ignore_calls) const {
+ bool ignore_calls,
+ bool *found_calls) const {
size_t num_instructions = m_instructions.size();
uint32_t next_branch = UINT32_MAX;
size_t i;
+
+ if (found_calls)
+ *found_calls = false;
for (i = start; i < num_instructions; i++) {
if (m_instructions[i]->DoesBranch()) {
- if (ignore_calls && m_instructions[i]->IsCall())
+ if (ignore_calls && m_instructions[i]->IsCall()) {
+ if (found_calls)
+ *found_calls = true;
continue;
+ }
next_branch = i;
break;
}
diff --git a/lldb/source/Target/Process.cpp b/lldb/source/Target/Process.cpp
index a731a353c1bc..a8fb32dafa89 100644
--- a/lldb/source/Target/Process.cpp
+++ b/lldb/source/Target/Process.cpp
@@ -5800,7 +5800,8 @@ Process::AdvanceAddressToNextBranchInstruction(Address default_stop_addr,
uint32_t branch_index =
insn_list->GetIndexOfNextBranchInstruction(insn_offset, target,
- false /* ignore_calls*/);
+ false /* ignore_calls*/,
+ nullptr);
if (branch_index == UINT32_MAX) {
return retval;
}
diff --git a/lldb/source/Target/ThreadPlanStepRange.cpp b/lldb/source/Target/ThreadPlanStepRange.cpp
index 27513a34eadb..a22337deaed5 100644
--- a/lldb/source/Target/ThreadPlanStepRange.cpp
+++ b/lldb/source/Target/ThreadPlanStepRange.cpp
@@ -238,8 +238,18 @@ lldb::FrameComparison ThreadPlanStepRange::CompareCurrentFrameToStartFrame() {
}
bool ThreadPlanStepRange::StopOthers() {
- return (m_stop_others == lldb::eOnlyThisThread ||
- m_stop_others == lldb::eOnlyDuringStepping);
+ switch (m_stop_others) {
+ case lldb::eOnlyThisThread:
+ return true;
+ case lldb::eOnlyDuringStepping:
+ // If there is a call in the range of the next branch breakpoint,
+ // then we should always run all threads, since a call can execute
+ // arbitrary code which might for instance take a lock that's held
+ // by another thread.
+ return !m_found_calls;
+ case lldb::eAllThreads:
+ return false;
+ }
}
InstructionList *ThreadPlanStepRange::GetInstructionsForAddress(
@@ -292,6 +302,7 @@ void ThreadPlanStepRange::ClearNextBranchBreakpoint() {
GetTarget().RemoveBreakpointByID(m_next_branch_bp_sp->GetID());
m_next_branch_bp_sp.reset();
m_could_not_resolve_hw_bp = false;
+ m_found_calls = false;
}
}
@@ -305,6 +316,9 @@ bool ThreadPlanStepRange::SetNextBranchBreakpoint() {
if (!m_use_fast_step)
return false;
+ // clear the m_found_calls, we'll rediscover it for this range.
+ m_found_calls = false;
+
lldb::addr_t cur_addr = GetThread().GetRegisterContext()->GetPC();
// Find the current address in our address ranges, and fetch the disassembly
// if we haven't already:
@@ -317,9 +331,11 @@ bool ThreadPlanStepRange::SetNextBranchBreakpoint() {
else {
Target &target = GetThread().GetProcess()->GetTarget();
const bool ignore_calls = GetKind() == eKindStepOverRange;
+ bool found_calls;
uint32_t branch_index =
instructions->GetIndexOfNextBranchInstruction(pc_index, target,
- ignore_calls);
+ ignore_calls,
+ &m_found_calls);
Address run_to_address;
More information about the lldb-commits
mailing list