[llvm] a5fff58 - [ThreadPool] Do not return shared futures.

Florian Hahn via llvm-commits llvm-commits at lists.llvm.org
Tue Nov 23 02:06:31 PST 2021


Author: Florian Hahn
Date: 2021-11-23T10:06:08Z
New Revision: a5fff58781f30ff3fd7a3f56948552cf7b8842bb

URL: https://github.com/llvm/llvm-project/commit/a5fff58781f30ff3fd7a3f56948552cf7b8842bb
DIFF: https://github.com/llvm/llvm-project/commit/a5fff58781f30ff3fd7a3f56948552cf7b8842bb.diff

LOG: [ThreadPool] Do not return shared futures.

The only users of returned futures from ThreadPool is llvm-reduce after
D113857.

There should be no cases where multiple threads wait on the same future,
so there should be no need to return std::shared_future<>. Instead return
plain std::future<>.

If users need to share a future between multiple threads, they can share
the futures themselves.

Reviewed By: Meinersbur, mehdi_amini

Differential Revision: https://reviews.llvm.org/D114363

Added: 
    

Modified: 
    llvm/include/llvm/Support/ThreadPool.h
    mlir/include/mlir/IR/Threading.h

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Support/ThreadPool.h b/llvm/include/llvm/Support/ThreadPool.h
index 8d30e8e92755a..1a51083cf8397 100644
--- a/llvm/include/llvm/Support/ThreadPool.h
+++ b/llvm/include/llvm/Support/ThreadPool.h
@@ -56,8 +56,7 @@ class ThreadPool {
 
   /// Asynchronous submission of a task to the pool. The returned future can be
   /// used to wait for the task to finish and is *non-blocking* on destruction.
-  template <typename Func>
-  auto async(Func &&F) -> std::shared_future<decltype(F())> {
+  template <typename Func> auto async(Func &&F) -> std::future<decltype(F())> {
     return asyncImpl(std::function<decltype(F())()>(std::forward<Func>(F)));
   }
 
@@ -101,7 +100,7 @@ class ThreadPool {
   /// Asynchronous submission of a task to the pool. The returned future can be
   /// used to wait for the task to finish and is *non-blocking* on destruction.
   template <typename ResTy>
-  std::shared_future<ResTy> asyncImpl(std::function<ResTy()> Task) {
+  std::future<ResTy> asyncImpl(std::function<ResTy()> Task) {
 
 #if LLVM_ENABLE_THREADS
     /// Wrap the Task in a std::function<void()> that sets the result of the
@@ -117,12 +116,12 @@ class ThreadPool {
       Tasks.push(std::move(R.first));
     }
     QueueCondition.notify_one();
-    return R.second.share();
+    return std::move(R.second);
 
 #else // LLVM_ENABLE_THREADS Disabled
 
     // Get a Future with launch::deferred execution using std::async
-    auto Future = std::async(std::launch::deferred, std::move(Task)).share();
+    auto Future = std::async(std::launch::deferred, std::move(Task));
     // Wrap the future so that both ThreadPool::wait() can operate and the
     // returned future can be sync'ed on.
     Tasks.push([Future]() { Future.get(); });

diff  --git a/mlir/include/mlir/IR/Threading.h b/mlir/include/mlir/IR/Threading.h
index 384223161014f..40ace73418bb8 100644
--- a/mlir/include/mlir/IR/Threading.h
+++ b/mlir/include/mlir/IR/Threading.h
@@ -71,14 +71,14 @@ LogicalResult failableParallelForEach(MLIRContext *context, IteratorT begin,
   // Otherwise, process the elements in parallel.
   llvm::ThreadPool &threadPool = context->getThreadPool();
   size_t numActions = std::min(numElements, threadPool.getThreadCount());
-  SmallVector<std::shared_future<void>> threadFutures;
+  SmallVector<std::future<void>> threadFutures;
   threadFutures.reserve(numActions - 1);
   for (unsigned i = 1; i < numActions; ++i)
     threadFutures.emplace_back(threadPool.async(processFn));
   processFn();
 
   // Wait for all of the threads to finish.
-  for (std::shared_future<void> &future : threadFutures)
+  for (std::future<void> &future : threadFutures)
     future.wait();
   return failure(processingFailed);
 }


        


More information about the llvm-commits mailing list