[llvm] r334658 - Revert "Enable ThreadPool to queue tasks that return values."
Zachary Turner via llvm-commits
llvm-commits at lists.llvm.org
Wed Jun 13 14:24:19 PDT 2018
Author: zturner
Date: Wed Jun 13 14:24:19 2018
New Revision: 334658
URL: http://llvm.org/viewvc/llvm-project?rev=334658&view=rev
Log:
Revert "Enable ThreadPool to queue tasks that return values."
This is failing to compile when LLVM_ENABLE_THREADS is false,
and the fix is not immediately obvious, so reverting while I look
into it.
Modified:
llvm/trunk/include/llvm/Support/ThreadPool.h
llvm/trunk/lib/Support/ThreadPool.cpp
llvm/trunk/unittests/Support/ThreadPool.cpp
Modified: llvm/trunk/include/llvm/Support/ThreadPool.h
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/Support/ThreadPool.h?rev=334658&r1=334657&r2=334658&view=diff
==============================================================================
--- llvm/trunk/include/llvm/Support/ThreadPool.h (original)
+++ llvm/trunk/include/llvm/Support/ThreadPool.h Wed Jun 13 14:24:19 2018
@@ -14,14 +14,12 @@
#ifndef LLVM_SUPPORT_THREAD_POOL_H
#define LLVM_SUPPORT_THREAD_POOL_H
-#include "llvm/ADT/STLExtras.h"
#include "llvm/Config/llvm-config.h"
#include "llvm/Support/thread.h"
#include <future>
#include <atomic>
-#include <cassert>
#include <condition_variable>
#include <functional>
#include <memory>
@@ -37,21 +35,10 @@ namespace llvm {
/// The pool keeps a vector of threads alive, waiting on a condition variable
/// for some work to become available.
class ThreadPool {
- struct TaskBase {
- virtual ~TaskBase() {}
- virtual void execute() = 0;
- };
-
- template <typename ReturnType> struct TypedTask : public TaskBase {
- explicit TypedTask(std::packaged_task<ReturnType()> Task)
- : Task(std::move(Task)) {}
-
- void execute() override { Task(); }
-
- std::packaged_task<ReturnType()> Task;
- };
-
public:
+ using TaskTy = std::function<void()>;
+ using PackagedTaskTy = std::packaged_task<void()>;
+
/// Construct a pool with the number of threads found by
/// hardware_concurrency().
ThreadPool();
@@ -65,8 +52,7 @@ public:
/// Asynchronous submission of a task to the pool. The returned future can be
/// used to wait for the task to finish and is *non-blocking* on destruction.
template <typename Function, typename... Args>
- inline std::shared_future<typename std::result_of<Function(Args...)>::type>
- async(Function &&F, Args &&... ArgList) {
+ inline std::shared_future<void> async(Function &&F, Args &&... ArgList) {
auto Task =
std::bind(std::forward<Function>(F), std::forward<Args>(ArgList)...);
return asyncImpl(std::move(Task));
@@ -75,8 +61,7 @@ public:
/// Asynchronous submission of a task to the pool. The returned future can be
/// used to wait for the task to finish and is *non-blocking* on destruction.
template <typename Function>
- inline std::shared_future<typename std::result_of<Function()>::type>
- async(Function &&F) {
+ inline std::shared_future<void> async(Function &&F) {
return asyncImpl(std::forward<Function>(F));
}
@@ -87,35 +72,13 @@ public:
private:
/// Asynchronous submission of a task to the pool. The returned future can be
/// used to wait for the task to finish and is *non-blocking* on destruction.
- template <typename TaskTy>
- std::shared_future<typename std::result_of<TaskTy()>::type>
- asyncImpl(TaskTy &&Task) {
- typedef decltype(Task()) ResultTy;
-
- /// Wrap the Task in a packaged_task to return a future object.
- std::packaged_task<ResultTy()> PackagedTask(std::move(Task));
- auto Future = PackagedTask.get_future();
- std::unique_ptr<TaskBase> TB =
- llvm::make_unique<TypedTask<ResultTy>>(std::move(PackagedTask));
-
- {
- // Lock the queue and push the new task
- std::unique_lock<std::mutex> LockGuard(QueueLock);
-
- // Don't allow enqueueing after disabling the pool
- assert(EnableFlag && "Queuing a thread during ThreadPool destruction");
-
- Tasks.push(std::move(TB));
- }
- QueueCondition.notify_one();
- return Future.share();
- }
+ std::shared_future<void> asyncImpl(TaskTy F);
/// Threads in flight
std::vector<llvm::thread> Threads;
/// Tasks waiting for execution in the pool.
- std::queue<std::unique_ptr<TaskBase>> Tasks;
+ std::queue<PackagedTaskTy> Tasks;
/// Locking and signaling for accessing the Tasks queue.
std::mutex QueueLock;
Modified: llvm/trunk/lib/Support/ThreadPool.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/Support/ThreadPool.cpp?rev=334658&r1=334657&r2=334658&view=diff
==============================================================================
--- llvm/trunk/lib/Support/ThreadPool.cpp (original)
+++ llvm/trunk/lib/Support/ThreadPool.cpp Wed Jun 13 14:24:19 2018
@@ -32,7 +32,7 @@ ThreadPool::ThreadPool(unsigned ThreadCo
for (unsigned ThreadID = 0; ThreadID < ThreadCount; ++ThreadID) {
Threads.emplace_back([&] {
while (true) {
- std::unique_ptr<TaskBase> Task;
+ PackagedTaskTy Task;
{
std::unique_lock<std::mutex> LockGuard(QueueLock);
// Wait for tasks to be pushed in the queue
@@ -54,7 +54,7 @@ ThreadPool::ThreadPool(unsigned ThreadCo
Tasks.pop();
}
// Run the task we just grabbed
- Task->execute();
+ Task();
{
// Adjust `ActiveThreads`, in case someone waits on ThreadPool::wait()
@@ -79,6 +79,23 @@ void ThreadPool::wait() {
[&] { return !ActiveThreads && Tasks.empty(); });
}
+std::shared_future<void> ThreadPool::asyncImpl(TaskTy Task) {
+ /// Wrap the Task in a packaged_task to return a future object.
+ PackagedTaskTy PackagedTask(std::move(Task));
+ auto Future = PackagedTask.get_future();
+ {
+ // Lock the queue and push the new task
+ std::unique_lock<std::mutex> LockGuard(QueueLock);
+
+ // Don't allow enqueueing after disabling the pool
+ assert(EnableFlag && "Queuing a thread during ThreadPool destruction");
+
+ Tasks.push(std::move(PackagedTask));
+ }
+ QueueCondition.notify_one();
+ return Future.share();
+}
+
// The destructor joins all threads, waiting for completion.
ThreadPool::~ThreadPool() {
{
Modified: llvm/trunk/unittests/Support/ThreadPool.cpp
URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/unittests/Support/ThreadPool.cpp?rev=334658&r1=334657&r2=334658&view=diff
==============================================================================
--- llvm/trunk/unittests/Support/ThreadPool.cpp (original)
+++ llvm/trunk/unittests/Support/ThreadPool.cpp Wed Jun 13 14:24:19 2018
@@ -147,25 +147,6 @@ TEST_F(ThreadPoolTest, GetFuture) {
ASSERT_EQ(2, i.load());
}
-TEST_F(ThreadPoolTest, TaskWithResult) {
- CHECK_UNSUPPORTED();
- // By making only 1 thread in the pool the two tasks are serialized with
- // respect to each other, which means that the second one must return 2.
- ThreadPool Pool{1};
- std::atomic_int i{0};
- Pool.async([this, &i] {
- waitForMainThread();
- ++i;
- });
- // Force the future using get()
- std::shared_future<int> Future = Pool.async([&i] { return ++i; });
- ASSERT_EQ(0, i.load());
- setMainThreadReady();
- int Result = Future.get();
- ASSERT_EQ(2, i.load());
- ASSERT_EQ(2, Result);
-}
-
TEST_F(ThreadPoolTest, PoolDestruction) {
CHECK_UNSUPPORTED();
// Test that we are waiting on destruction
More information about the llvm-commits
mailing list