[Lldb-commits] [lldb] [RFC][lldb-dap] Always stop on enrty for attaching (PR #134339)
via lldb-commits
lldb-commits at lists.llvm.org
Thu Apr 3 19:52:26 PDT 2025
llvmbot wrote:
<!--LLVM PR SUMMARY COMMENT-->
@llvm/pr-subscribers-lldb
Author: Wanyi (kusmour)
<details>
<summary>Changes</summary>
Recently upon debugging a program with thousands of threads in VS Code, lldb-dap would hang at a `threads` request sent right after receiving the `configurationDone` response. Soon after it will end the debug session with the following error
```
Process <pid> exited with status = -1 (0xffffffff) lost connection
```
This is because LLDB is still in the middle of resuming all the threads. And requesting threads will require stopping the process. From the gdb-remote log it ended up getting `lldb::StateType::eStateInvalid` and just exit with status -1.
I don't think it's reasonable to allow getting threads from a running process. The alternative will be reject the `threads` request if the process is not stopped.
---
Full diff: https://github.com/llvm/llvm-project/pull/134339.diff
2 Files Affected:
- (added) lldb/test/API/tools/lldb-dap/attach/multithreading.cpp (+102)
- (modified) lldb/tools/lldb-dap/Handler/AttachRequestHandler.cpp (+5-3)
``````````diff
diff --git a/lldb/test/API/tools/lldb-dap/attach/multithreading.cpp b/lldb/test/API/tools/lldb-dap/attach/multithreading.cpp
new file mode 100644
index 0000000000000..0e9c340916ece
--- /dev/null
+++ b/lldb/test/API/tools/lldb-dap/attach/multithreading.cpp
@@ -0,0 +1,102 @@
+#include <condition_variable>
+#include <functional>
+#include <iostream>
+#include <mutex>
+#include <queue>
+#include <thread>
+
+int plus_one(int n) {
+ std::cout << "In plus_one" << std::endl;
+ return n + 1;
+}
+
+class ThreadPool {
+ public:
+ ThreadPool(size_t num_threads = std::thread::hardware_concurrency()) {
+ for (uint32_t i = 0; i < num_threads; ++i) {
+ threads.emplace_back(std::thread(&ThreadPool::ThreadLoop, this));
+ }
+ }
+ ~ThreadPool() {
+ {
+ std::unique_lock<std::mutex> lock(queue_mutex);
+ terminated = true;
+ }
+ condition.notify_all();
+ for (std::thread& thread : threads) {
+ thread.join();
+ }
+ }
+ void enqueue(const std::function<void()>& job) {
+ {
+ std::unique_lock<std::mutex> lock(queue_mutex);
+ if (terminated)
+ throw std::runtime_error("enqueue on stopped ThreadPool");
+ jobs.push(job);
+ }
+ condition.notify_one();
+ }
+
+ private:
+ void ThreadLoop() {
+ while (true) {
+ std::function<void()> job;
+ {
+ std::unique_lock<std::mutex> lock(queue_mutex);
+ condition.wait(lock, [this] { return !jobs.empty() || terminated; });
+ if (terminated && jobs.empty())
+ return;
+ job = jobs.front();
+ jobs.pop();
+ }
+ job();
+ }
+ }
+
+ bool terminated = false;
+ std::mutex queue_mutex;
+ std::condition_variable condition;
+ std::vector<std::thread> threads;
+ std::queue<std::function<void()>> jobs;
+};
+
+void thread_func(int job_index) {
+ std::cout << __FUNCTION__ << " (job index = " << job_index
+ << ") running on thread " << std::this_thread::get_id() << "\n";
+ std::cout << "Calling function plus_one(int)\nResult = "
+ << plus_one(job_index) << "\n";
+}
+
+void thread_sleep(int job_index, int sleep_sec) {
+ std::cout << __FUNCTION__ << " (job index = " << job_index
+ << ") starting on thread " << std::this_thread::get_id() << "\n";
+ std::this_thread::sleep_for(std::chrono::seconds(sleep_sec));
+ std::cout << __FUNCTION__ << " (job index = " << job_index
+ << ") finished on thread " << std::this_thread::get_id() << "\n";
+}
+
+int main() {
+ ThreadPool tp1(2000);
+ ThreadPool tp2;
+ std::cout << "main() running on thread " << std::this_thread::get_id()
+ << "\n";
+ std::cout
+ << "Program is expecting stdin. Please attach debugger and hit enter to continue\n";
+ std::cin.get();
+ // At least one of the thread in tp1 will be sceduled with a task twice or
+ // more if num_jobs is larger than #threads in tp1.
+ int num_jobs = 3000;
+ for (int i = 0; i < num_jobs; ++i) {
+ tp1.enqueue(std::bind(thread_func, i));
+ }
+ // We may or may not hit the breakpoint thread_sleep here, as the tread pool
+ // might finish executing before debugger release the bp for tp1.
+ // To make sure we hit the bp, we can increase the sleep time in the call.
+ for (int i = 0; i < num_jobs; ++i) {
+ tp2.enqueue([i] { thread_sleep(i, 1); });
+ }
+ for (int i = 0; i < num_jobs; ++i) {
+ tp1.enqueue(std::bind(thread_sleep, i, 1));
+ return 0;
+ }
+}
diff --git a/lldb/tools/lldb-dap/Handler/AttachRequestHandler.cpp b/lldb/tools/lldb-dap/Handler/AttachRequestHandler.cpp
index 5e622f3d3dcd4..aa7f3c0d57f9d 100644
--- a/lldb/tools/lldb-dap/Handler/AttachRequestHandler.cpp
+++ b/lldb/tools/lldb-dap/Handler/AttachRequestHandler.cpp
@@ -73,9 +73,11 @@ void AttachRequestHandler::operator()(const llvm::json::Object &request) const {
llvm::StringRef core_file = GetString(arguments, "coreFile").value_or("");
const uint64_t timeout_seconds =
GetInteger<uint64_t>(arguments, "timeout").value_or(30);
- dap.stop_at_entry = core_file.empty()
- ? GetBoolean(arguments, "stopOnEntry").value_or(false)
- : true;
+ // Clients like VS Code sends threads request right after receiving
+ // configurationDone reponse where the process might be resuming.
+ // Getting threads list on a running process is not supported by LLDB.
+ // Always stop the process after attaching.
+ dap.stop_at_entry = true;
dap.configuration.postRunCommands = GetStrings(arguments, "postRunCommands");
const llvm::StringRef debuggerRoot =
GetString(arguments, "debuggerRoot").value_or("");
``````````
</details>
https://github.com/llvm/llvm-project/pull/134339
More information about the lldb-commits
mailing list