[llvm] Use `llvm::unique_function` in the async APIs (PR #166727)

Chandler Carruth via llvm-commits llvm-commits at lists.llvm.org
Thu Nov 6 15:59:42 PST 2025


https://github.com/chandlerc updated https://github.com/llvm/llvm-project/pull/166727

>From 5f2890b033b8e0f447e73b4992d8f93bbc719f5c Mon Sep 17 00:00:00 2001
From: Chandler Carruth <chandlerc at gmail.com>
Date: Thu, 6 Nov 2025 01:54:15 -0600
Subject: [PATCH 1/2] Use `llvm::unique_function` in the async APIs

This is needed to allow using these APIs with callable objects that
transitively capture move-only constructs. These come up very widely
when writing concurrent code such a `std::future`, `std::promise`,
`std::unique_lock`, etc.
---
 llvm/include/llvm/Support/ThreadPool.h | 23 +++++++++++++----------
 llvm/lib/Support/ThreadPool.cpp        |  4 ++--
 2 files changed, 15 insertions(+), 12 deletions(-)

diff --git a/llvm/include/llvm/Support/ThreadPool.h b/llvm/include/llvm/Support/ThreadPool.h
index c20efc7396b79..d3276a18dc2c6 100644
--- a/llvm/include/llvm/Support/ThreadPool.h
+++ b/llvm/include/llvm/Support/ThreadPool.h
@@ -14,6 +14,7 @@
 #define LLVM_SUPPORT_THREADPOOL_H
 
 #include "llvm/ADT/DenseMap.h"
+#include "llvm/ADT/FunctionExtras.h"
 #include "llvm/Config/llvm-config.h"
 #include "llvm/Support/Compiler.h"
 #include "llvm/Support/Jobserver.h"
@@ -51,7 +52,7 @@ class ThreadPoolTaskGroup;
 class LLVM_ABI ThreadPoolInterface {
   /// The actual method to enqueue a task to be defined by the concrete
   /// implementation.
-  virtual void asyncEnqueue(std::function<void()> Task,
+  virtual void asyncEnqueue(llvm::unique_function<void()> Task,
                             ThreadPoolTaskGroup *Group) = 0;
 
 public:
@@ -95,22 +96,22 @@ class LLVM_ABI ThreadPoolInterface {
   /// used to wait for the task to finish and is *non-blocking* on destruction.
   template <typename Func>
   auto async(Func &&F) -> std::shared_future<decltype(F())> {
-    return asyncImpl(std::function<decltype(F())()>(std::forward<Func>(F)),
-                     nullptr);
+    return asyncImpl(
+        llvm::unique_function<decltype(F())()>(std::forward<Func>(F)), nullptr);
   }
 
   template <typename Func>
   auto async(ThreadPoolTaskGroup &Group, Func &&F)
       -> std::shared_future<decltype(F())> {
-    return asyncImpl(std::function<decltype(F())()>(std::forward<Func>(F)),
-                     &Group);
+    return asyncImpl(
+        llvm::unique_function<decltype(F())()>(std::forward<Func>(F)), &Group);
   }
 
 private:
   /// Asynchronous submission of a task to the pool. The returned future can be
   /// used to wait for the task to finish and is *non-blocking* on destruction.
   template <typename ResTy>
-  std::shared_future<ResTy> asyncImpl(std::function<ResTy()> Task,
+  std::shared_future<ResTy> asyncImpl(llvm::unique_function<ResTy()> Task,
                                       ThreadPoolTaskGroup *Group) {
     auto Future = std::async(std::launch::deferred, std::move(Task)).share();
     asyncEnqueue([Future]() { Future.wait(); }, Group);
@@ -160,7 +161,7 @@ class LLVM_ABI StdThreadPool : public ThreadPoolInterface {
 
   /// Asynchronous submission of a task to the pool. The returned future can be
   /// used to wait for the task to finish and is *non-blocking* on destruction.
-  void asyncEnqueue(std::function<void()> Task,
+  void asyncEnqueue(llvm::unique_function<void()> Task,
                     ThreadPoolTaskGroup *Group) override {
     int requestedThreads;
     {
@@ -189,7 +190,8 @@ class LLVM_ABI StdThreadPool : public ThreadPoolInterface {
   mutable llvm::sys::RWMutex ThreadsLock;
 
   /// Tasks waiting for execution in the pool.
-  std::deque<std::pair<std::function<void()>, ThreadPoolTaskGroup *>> Tasks;
+  std::deque<std::pair<llvm::unique_function<void()>, ThreadPoolTaskGroup *>>
+      Tasks;
 
   /// Locking and signaling for accessing the Tasks queue.
   std::mutex QueueLock;
@@ -239,13 +241,14 @@ class LLVM_ABI SingleThreadExecutor : public ThreadPoolInterface {
 private:
   /// Asynchronous submission of a task to the pool. The returned future can be
   /// used to wait for the task to finish and is *non-blocking* on destruction.
-  void asyncEnqueue(std::function<void()> Task,
+  void asyncEnqueue(llvm::unique_function<void()> Task,
                     ThreadPoolTaskGroup *Group) override {
     Tasks.emplace_back(std::make_pair(std::move(Task), Group));
   }
 
   /// Tasks waiting for execution in the pool.
-  std::deque<std::pair<std::function<void()>, ThreadPoolTaskGroup *>> Tasks;
+  std::deque<std::pair<llvm::unique_function<void()>, ThreadPoolTaskGroup *>>
+      Tasks;
 };
 
 #if LLVM_ENABLE_THREADS
diff --git a/llvm/lib/Support/ThreadPool.cpp b/llvm/lib/Support/ThreadPool.cpp
index 69602688cf3fd..4779e673cc055 100644
--- a/llvm/lib/Support/ThreadPool.cpp
+++ b/llvm/lib/Support/ThreadPool.cpp
@@ -73,7 +73,7 @@ static LLVM_THREAD_LOCAL std::vector<ThreadPoolTaskGroup *>
 // WaitingForGroup == nullptr means all tasks regardless of their group.
 void StdThreadPool::processTasks(ThreadPoolTaskGroup *WaitingForGroup) {
   while (true) {
-    std::function<void()> Task;
+    llvm::unique_function<void()> Task;
     ThreadPoolTaskGroup *GroupOfTask;
     {
       std::unique_lock<std::mutex> LockGuard(QueueLock);
@@ -189,7 +189,7 @@ void StdThreadPool::processTasksWithJobserver() {
 
     // While we hold a job slot, process tasks from the internal queue.
     while (true) {
-      std::function<void()> Task;
+      llvm::unique_function<void()> Task;
       ThreadPoolTaskGroup *GroupOfTask = nullptr;
 
       {

>From 63b1f6de8e7037017e2b981ff6a8f1f5093b2100 Mon Sep 17 00:00:00 2001
From: Chandler Carruth <chandlerc at gmail.com>
Date: Thu, 6 Nov 2025 16:15:28 -0600
Subject: [PATCH 2/2] Add unit test

---
 llvm/unittests/Support/ThreadPool.cpp | 14 ++++++++++++++
 1 file changed, 14 insertions(+)

diff --git a/llvm/unittests/Support/ThreadPool.cpp b/llvm/unittests/Support/ThreadPool.cpp
index aa7f8744e1417..5612c95134ecb 100644
--- a/llvm/unittests/Support/ThreadPool.cpp
+++ b/llvm/unittests/Support/ThreadPool.cpp
@@ -183,6 +183,20 @@ TYPED_TEST(ThreadPoolTest, Async) {
   ASSERT_EQ(2, i.load());
 }
 
+TYPED_TEST(ThreadPoolTest, AsyncMoveOnly) {
+  CHECK_UNSUPPORTED();
+  DefaultThreadPool Pool;
+  std::promise<int> p;
+  std::future<int> f = p.get_future();
+  Pool.async([this, p = std::move(p)] {
+    this->waitForMainThread();
+    p.set_value(42);
+  });
+  this->setMainThreadReady();
+  Pool.wait();
+  ASSERT_EQ(42, f.get());
+}
+
 TYPED_TEST(ThreadPoolTest, GetFuture) {
   CHECK_UNSUPPORTED();
   DefaultThreadPool Pool(hardware_concurrency(2));



More information about the llvm-commits mailing list