[Mlir-commits] [mlir] fb46e64 - Revert "[ThreadPool] Do not return shared futures."
Florian Hahn
llvmlistbot at llvm.org
Wed Nov 24 11:02:10 PST 2021
Author: Florian Hahn
Date: 2021-11-24T19:01:47Z
New Revision: fb46e64a013a2c7457db3312da07db1b25ad5219
URL: https://github.com/llvm/llvm-project/commit/fb46e64a013a2c7457db3312da07db1b25ad5219
DIFF: https://github.com/llvm/llvm-project/commit/fb46e64a013a2c7457db3312da07db1b25ad5219.diff
LOG: Revert "[ThreadPool] Do not return shared futures."
This reverts commit a5fff58781f30ff3fd7a3f56948552cf7b8842bb.
The offending commit broke building with LLVM_ENABLE_THREADS=OFF.
Added:
Modified:
llvm/include/llvm/Support/ThreadPool.h
llvm/tools/llvm-reduce/deltas/Delta.cpp
mlir/include/mlir/IR/Threading.h
Removed:
################################################################################
diff --git a/llvm/include/llvm/Support/ThreadPool.h b/llvm/include/llvm/Support/ThreadPool.h
index 1a51083cf8397..4f6ccc069f0fc 100644
--- a/llvm/include/llvm/Support/ThreadPool.h
+++ b/llvm/include/llvm/Support/ThreadPool.h
@@ -56,7 +56,8 @@ class ThreadPool {
/// Asynchronous submission of a task to the pool. The returned future can be
/// used to wait for the task to finish and is *non-blocking* on destruction.
- template <typename Func> auto async(Func &&F) -> std::future<decltype(F())> {
+ template <typename Func>
+ auto async(Func &&F) -> std::shared_future<decltype(F())> {
return asyncImpl(std::function<decltype(F())()>(std::forward<Func>(F)));
}
@@ -100,7 +101,7 @@ class ThreadPool {
/// Asynchronous submission of a task to the pool. The returned future can be
/// used to wait for the task to finish and is *non-blocking* on destruction.
template <typename ResTy>
- std::future<ResTy> asyncImpl(std::function<ResTy()> Task) {
+ std::shared_future<ResTy> asyncImpl(std::function<ResTy()> Task) {
#if LLVM_ENABLE_THREADS
/// Wrap the Task in a std::function<void()> that sets the result of the
@@ -116,12 +117,13 @@ class ThreadPool {
Tasks.push(std::move(R.first));
}
QueueCondition.notify_one();
- return std::move(R.second);
+ return R.second.share();
#else // LLVM_ENABLE_THREADS Disabled
// Get a Future with launch::deferred execution using std::async
- auto Future = std::async(std::launch::deferred, std::move(Task));
+ std::future<void> Future =
+ std::async(std::launch::deferred, std::move(Task)).share();
// Wrap the future so that both ThreadPool::wait() can operate and the
// returned future can be sync'ed on.
Tasks.push([Future]() { Future.get(); });
diff --git a/llvm/tools/llvm-reduce/deltas/Delta.cpp b/llvm/tools/llvm-reduce/deltas/Delta.cpp
index 4e1bd6d4f495c..8666d8baac66d 100644
--- a/llvm/tools/llvm-reduce/deltas/Delta.cpp
+++ b/llvm/tools/llvm-reduce/deltas/Delta.cpp
@@ -267,7 +267,7 @@ void runDeltaPassInt(
WriteBitcodeToFile(*Test.getProgram().M, BCOS);
}
- std::deque<std::future<SmallString<0>>> TaskQueue;
+ std::deque<std::shared_future<SmallString<0>>> TaskQueue;
for (auto I = ChunksStillConsideredInteresting.rbegin(),
E = ChunksStillConsideredInteresting.rend();
I != E; ++I) {
diff --git a/mlir/include/mlir/IR/Threading.h b/mlir/include/mlir/IR/Threading.h
index 40ace73418bb8..384223161014f 100644
--- a/mlir/include/mlir/IR/Threading.h
+++ b/mlir/include/mlir/IR/Threading.h
@@ -71,14 +71,14 @@ LogicalResult failableParallelForEach(MLIRContext *context, IteratorT begin,
// Otherwise, process the elements in parallel.
llvm::ThreadPool &threadPool = context->getThreadPool();
size_t numActions = std::min(numElements, threadPool.getThreadCount());
- SmallVector<std::future<void>> threadFutures;
+ SmallVector<std::shared_future<void>> threadFutures;
threadFutures.reserve(numActions - 1);
for (unsigned i = 1; i < numActions; ++i)
threadFutures.emplace_back(threadPool.async(processFn));
processFn();
// Wait for all of the threads to finish.
- for (std::future<void> &future : threadFutures)
+ for (std::shared_future<void> &future : threadFutures)
future.wait();
return failure(processingFailed);
}
More information about the Mlir-commits
mailing list