<div dir="ltr">Ahh sorry, I will fix in a followup, trying to get the bots green at the moment.</div><br><div class="gmail_quote"><div dir="ltr">On Fri, May 5, 2017 at 2:28 PM Rui Ueyama <<a href="mailto:ruiu@google.com">ruiu@google.com</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex"><div dir="ltr"><div class="gmail_extra"><div class="gmail_quote">On Fri, May 5, 2017 at 2:09 PM, Zachary Turner via llvm-commits <span dir="ltr"><<a href="mailto:llvm-commits@lists.llvm.org" target="_blank">llvm-commits@lists.llvm.org</a>></span> wrote:<br><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">Author: zturner<br>
Date: Fri May 5 16:09:26 2017<br>
New Revision: 302288<br>
<br>
URL: <a href="http://llvm.org/viewvc/llvm-project?rev=302288&view=rev" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project?rev=302288&view=rev</a><br>
Log:<br>
Split up Parallel and LLVM'ize naming conventions.<br>
<br>
This is one step in preparation of raising this up to<br>
LLVM. This hides all of the Executor stuff in a private<br>
implementation file, leaving only the core algorithms and<br>
the TaskGroup class exposed. In doing so, fix up all the<br>
variable names to conform to LLVM style.<br>
<br>
Differential Revision: <a href="https://reviews.llvm.org/D32890" rel="noreferrer" target="_blank">https://reviews.llvm.org/D32890</a><br>
<br>
Added:<br>
lld/trunk/include/lld/Core/TaskGroup.h<br>
lld/trunk/lib/Core/TaskGroup.cpp<br>
Modified:<br>
lld/trunk/include/lld/Core/Parallel.h<br>
lld/trunk/lib/Core/CMakeLists.txt<br>
lld/trunk/unittests/CoreTests/CMakeLists.txt<br>
<br>
Modified: lld/trunk/include/lld/Core/Parallel.h<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/lld/trunk/include/lld/Core/Parallel.h?rev=302288&r1=302287&r2=302288&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/lld/trunk/include/lld/Core/Parallel.h?rev=302288&r1=302287&r2=302288&view=diff</a><br>
==============================================================================<br>
--- lld/trunk/include/lld/Core/Parallel.h (original)<br>
+++ lld/trunk/include/lld/Core/Parallel.h Fri May 5 16:09:26 2017<br>
@@ -10,16 +10,12 @@<br>
#ifndef LLD_CORE_PARALLEL_H<br>
#define LLD_CORE_PARALLEL_H<br>
<br>
-#include "lld/Core/Instrumentation.h"<br>
#include "lld/Core/LLVM.h"<br>
+#include "lld/Core/TaskGroup.h"<br>
#include "llvm/Support/MathExtras.h"<br>
-#include "llvm/Support/thread.h"<br>
+#include "llvm/config/llvm-config.h"<br>
<br>
#include <algorithm><br>
-#include <atomic><br>
-#include <condition_variable><br>
-#include <mutex><br>
-#include <stack><br>
<br>
#if defined(_MSC_VER) && LLVM_ENABLE_THREADS<br>
#include <concrt.h><br>
@@ -27,249 +23,84 @@<br>
#endif<br>
<br>
namespace lld {<br>
-/// \brief Allows one or more threads to wait on a potentially unknown number of<br>
-/// events.<br>
-///<br>
-/// A latch starts at \p count. inc() increments this, and dec() decrements it.<br>
-/// All calls to sync() will block while the count is not 0.<br>
-///<br>
-/// Calling dec() on a Latch with a count of 0 has undefined behaivor.<br>
-class Latch {<br>
- uint32_t _count;<br>
- mutable std::mutex _condMut;<br>
- mutable std::condition_variable _cond;<br>
-<br>
-public:<br>
- explicit Latch(uint32_t count = 0) : _count(count) {}<br>
- ~Latch() { sync(); }<br>
-<br>
- void inc() {<br>
- std::unique_lock<std::mutex> lock(_condMut);<br>
- ++_count;<br>
- }<br>
-<br>
- void dec() {<br>
- std::unique_lock<std::mutex> lock(_condMut);<br>
- if (--_count == 0)<br>
- _cond.notify_all();<br>
- }<br>
-<br>
- void sync() const {<br>
- std::unique_lock<std::mutex> lock(_condMut);<br>
- _cond.wait(lock, [&] {<br>
- return _count == 0;<br>
- });<br>
- }<br>
-};<br>
-<br>
-// Classes in this namespace are implementation details of this header.<br>
-namespace internal {<br>
-<br>
-/// \brief An abstract class that takes closures and runs them asynchronously.<br>
-class Executor {<br>
-public:<br>
- virtual ~Executor() = default;<br>
- virtual void add(std::function<void()> func) = 0;<br>
-};<br>
-<br>
-#if !defined(LLVM_ENABLE_THREADS) || LLVM_ENABLE_THREADS == 0<br>
-class SyncExecutor : public Executor {<br>
-public:<br>
- virtual void add(std::function<void()> func) {<br>
- func();<br>
- }<br>
-};<br>
-<br>
-inline Executor *getDefaultExecutor() {<br>
- static SyncExecutor exec;<br>
- return &exec;<br>
-}<br>
-#elif defined(_MSC_VER)<br>
-/// \brief An Executor that runs tasks via ConcRT.<br>
-class ConcRTExecutor : public Executor {<br>
- struct Taskish {<br>
- Taskish(std::function<void()> task) : _task(task) {}<br>
-<br>
- std::function<void()> _task;<br>
-<br>
- static void run(void *p) {<br>
- Taskish *self = static_cast<Taskish *>(p);<br>
- self->_task();<br>
- concurrency::Free(self);<br>
- }<br>
- };<br>
-<br>
-public:<br>
- virtual void add(std::function<void()> func) {<br>
- Concurrency::CurrentScheduler::ScheduleTask(Taskish::run,<br>
- new (concurrency::Alloc(sizeof(Taskish))) Taskish(func));<br>
- }<br>
-};<br>
-<br>
-inline Executor *getDefaultExecutor() {<br>
- static ConcRTExecutor exec;<br>
- return &exec;<br>
-}<br>
-#else<br>
-/// \brief An implementation of an Executor that runs closures on a thread pool<br>
-/// in filo order.<br>
-class ThreadPoolExecutor : public Executor {<br>
-public:<br>
- explicit ThreadPoolExecutor(unsigned threadCount =<br>
- std::thread::hardware_concurrency())<br>
- : _stop(false), _done(threadCount) {<br>
- // Spawn all but one of the threads in another thread as spawning threads<br>
- // can take a while.<br>
- std::thread([&, threadCount] {<br>
- for (size_t i = 1; i < threadCount; ++i) {<br>
- std::thread([=] {<br>
- work();<br>
- }).detach();<br>
- }<br>
- work();<br>
- }).detach();<br>
- }<br>
-<br>
- ~ThreadPoolExecutor() override {<br>
- std::unique_lock<std::mutex> lock(_mutex);<br>
- _stop = true;<br>
- lock.unlock();<br>
- _cond.notify_all();<br>
- // Wait for ~Latch.<br>
- }<br>
-<br>
- void add(std::function<void()> f) override {<br>
- std::unique_lock<std::mutex> lock(_mutex);<br>
- _workStack.push(f);<br>
- lock.unlock();<br>
- _cond.notify_one();<br>
- }<br>
-<br>
-private:<br>
- void work() {<br>
- while (true) {<br>
- std::unique_lock<std::mutex> lock(_mutex);<br>
- _cond.wait(lock, [&] {<br>
- return _stop || !_workStack.empty();<br>
- });<br>
- if (_stop)<br>
- break;<br>
- auto task = _workStack.top();<br>
- _workStack.pop();<br>
- lock.unlock();<br>
- task();<br>
- }<br>
- _done.dec();<br>
- }<br>
-<br>
- std::atomic<bool> _stop;<br>
- std::stack<std::function<void()>> _workStack;<br>
- std::mutex _mutex;<br>
- std::condition_variable _cond;<br>
- Latch _done;<br>
-};<br>
-<br>
-inline Executor *getDefaultExecutor() {<br>
- static ThreadPoolExecutor exec;<br>
- return &exec;<br>
-}<br>
-#endif<br>
-<br>
-} // namespace internal<br>
-<br>
-/// \brief Allows launching a number of tasks and waiting for them to finish<br>
-/// either explicitly via sync() or implicitly on destruction.<br>
-class TaskGroup {<br>
- Latch _latch;<br></blockquote><div><br></div></div></div></div><div dir="ltr"><div class="gmail_extra"><div class="gmail_quote"><div>You missed to fix this variable name.</div></div></div></div><div dir="ltr"><div class="gmail_extra"><div class="gmail_quote"><div> </div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">
-<br>
-public:<br>
- void spawn(std::function<void()> f) {<br>
- _latch.inc();<br>
- internal::getDefaultExecutor()->add([&, f] {<br>
- f();<br>
- _latch.dec();<br>
- });<br>
- }<br>
-<br>
- void sync() const { _latch.sync(); }<br>
-};<br>
<br>
-#if !defined(LLVM_ENABLE_THREADS) || LLVM_ENABLE_THREADS == 0<br>
-template <class RandomAccessIterator, class Comp><br>
+#if !LLVM_ENABLE_THREADS<br>
+template <class RandomAccessIterator, class Comparator><br>
void parallel_sort(<br>
- RandomAccessIterator start, RandomAccessIterator end,<br>
- const Comp &comp = std::less<<br>
+ RandomAccessIterator Start, RandomAccessIterator End,<br>
+ const Comparator &Comp = std::less<<br>
typename std::iterator_traits<RandomAccessIterator>::value_type>()) {<br>
- std::sort(start, end, comp);<br>
+ std::sort(Start, End, Comp);<br>
}<br>
#elif defined(_MSC_VER)<br>
// Use ppl parallel_sort on Windows.<br>
-template <class RandomAccessIterator, class Comp><br>
+template <class RandomAccessIterator, class Comparator><br>
void parallel_sort(<br>
- RandomAccessIterator start, RandomAccessIterator end,<br>
- const Comp &comp = std::less<<br>
+ RandomAccessIterator Start, RandomAccessIterator End,<br>
+ const Comparator &Comp = std::less<<br>
typename std::iterator_traits<RandomAccessIterator>::value_type>()) {<br>
- concurrency::parallel_sort(start, end, comp);<br>
+ concurrency::parallel_sort(Start, End, Comp);<br>
}<br>
#else<br>
namespace detail {<br>
-const ptrdiff_t minParallelSize = 1024;<br>
+const ptrdiff_t MinParallelSize = 1024;<br>
<br>
/// \brief Inclusive median.<br>
-template <class RandomAccessIterator, class Comp><br>
-RandomAccessIterator medianOf3(RandomAccessIterator start,<br>
- RandomAccessIterator end, const Comp &comp) {<br>
- RandomAccessIterator mid = start + (std::distance(start, end) / 2);<br>
- return comp(*start, *(end - 1))<br>
- ? (comp(*mid, *(end - 1)) ? (comp(*start, *mid) ? mid : start)<br>
- : end - 1)<br>
- : (comp(*mid, *start) ? (comp(*(end - 1), *mid) ? mid : end - 1)<br>
- : start);<br>
+template <class RandomAccessIterator, class Comparator><br>
+RandomAccessIterator medianOf3(RandomAccessIterator Start,<br>
+ RandomAccessIterator End,<br>
+ const Comparator &Comp) {<br>
+ RandomAccessIterator Mid = Start + (std::distance(Start, End) / 2);<br>
+ return Comp(*Start, *(End - 1))<br>
+ ? (Comp(*Mid, *(End - 1)) ? (Comp(*Start, *Mid) ? Mid : Start)<br>
+ : End - 1)<br>
+ : (Comp(*Mid, *Start) ? (Comp(*(End - 1), *Mid) ? Mid : End - 1)<br>
+ : Start);<br>
}<br>
<br>
-template <class RandomAccessIterator, class Comp><br>
-void parallel_quick_sort(RandomAccessIterator start, RandomAccessIterator end,<br>
- const Comp &comp, TaskGroup &tg, size_t depth) {<br>
+template <class RandomAccessIterator, class Comparator><br>
+void parallel_quick_sort(RandomAccessIterator Start, RandomAccessIterator End,<br>
+ const Comparator &Comp, TaskGroup &TG, size_t Depth) {<br>
// Do a sequential sort for small inputs.<br>
- if (std::distance(start, end) < detail::minParallelSize || depth == 0) {<br>
- std::sort(start, end, comp);<br>
+ if (std::distance(Start, End) < detail::MinParallelSize || Depth == 0) {<br>
+ std::sort(Start, End, Comp);<br>
return;<br>
}<br>
<br>
// Partition.<br>
- auto pivot = medianOf3(start, end, comp);<br>
- // Move pivot to end.<br>
- std::swap(*(end - 1), *pivot);<br>
- pivot = std::partition(start, end - 1, [&comp, end](decltype(*start) v) {<br>
- return comp(v, *(end - 1));<br>
+ auto Pivot = medianOf3(Start, End, Comp);<br>
+ // Move Pivot to End.<br>
+ std::swap(*(End - 1), *Pivot);<br>
+ Pivot = std::partition(Start, End - 1, [&Comp, End](decltype(*Start) V) {<br>
+ return Comp(V, *(End - 1));<br>
});<br>
- // Move pivot to middle of partition.<br>
- std::swap(*pivot, *(end - 1));<br>
+ // Move Pivot to middle of partition.<br>
+ std::swap(*Pivot, *(End - 1));<br>
<br>
// Recurse.<br>
- tg.spawn([=, &comp, &tg] {<br>
- parallel_quick_sort(start, pivot, comp, tg, depth - 1);<br>
+ TG.spawn([=, &Comp, &TG] {<br>
+ parallel_quick_sort(Start, Pivot, Comp, TG, Depth - 1);<br>
});<br>
- parallel_quick_sort(pivot + 1, end, comp, tg, depth - 1);<br>
+ parallel_quick_sort(Pivot + 1, End, Comp, TG, Depth - 1);<br>
}<br>
}<br>
<br>
-template <class RandomAccessIterator, class Comp><br>
+template <class RandomAccessIterator, class Comparator><br>
void parallel_sort(<br>
- RandomAccessIterator start, RandomAccessIterator end,<br>
- const Comp &comp = std::less<<br>
+ RandomAccessIterator Start, RandomAccessIterator End,<br>
+ const Comparator &Comp = std::less<<br>
typename std::iterator_traits<RandomAccessIterator>::value_type>()) {<br>
- TaskGroup tg;<br>
- detail::parallel_quick_sort(start, end, comp, tg,<br>
- llvm::Log2_64(std::distance(start, end)) + 1);<br>
+ TaskGroup TG;<br>
+ detail::parallel_quick_sort(Start, End, Comp, TG,<br>
+ llvm::Log2_64(std::distance(Start, End)) + 1);<br>
}<br>
#endif<br>
<br>
-template <class T> void parallel_sort(T *start, T *end) {<br>
- parallel_sort(start, end, std::less<T>());<br>
+template <class T> void parallel_sort(T *Start, T *End) {<br>
+ parallel_sort(Start, End, std::less<T>());<br>
}<br>
<br>
-#if !defined(LLVM_ENABLE_THREADS) || LLVM_ENABLE_THREADS == 0<br>
+#if !LLVM_ENABLE_THREADS<br>
template <class IterTy, class FuncTy><br>
void parallel_for_each(IterTy Begin, IterTy End, FuncTy Fn) {<br>
std::for_each(Begin, End, Fn);<br>
@@ -302,12 +133,12 @@ void parallel_for_each(IterTy Begin, Ite<br>
if (TaskSize == 0)<br>
TaskSize = 1;<br>
<br>
- TaskGroup Tg;<br>
+ TaskGroup TG;<br>
while (TaskSize <= std::distance(Begin, End)) {<br>
- Tg.spawn([=, &Fn] { std::for_each(Begin, Begin + TaskSize, Fn); });<br>
+ TG.spawn([=, &Fn] { std::for_each(Begin, Begin + TaskSize, Fn); });<br>
Begin += TaskSize;<br>
}<br>
- Tg.spawn([=, &Fn] { std::for_each(Begin, End, Fn); });<br>
+ TG.spawn([=, &Fn] { std::for_each(Begin, End, Fn); });<br>
}<br>
<br>
template <class IndexTy, class FuncTy><br>
@@ -316,20 +147,20 @@ void parallel_for(IndexTy Begin, IndexTy<br>
if (TaskSize == 0)<br>
TaskSize = 1;<br>
<br>
- TaskGroup Tg;<br>
+ TaskGroup TG;<br>
IndexTy I = Begin;<br>
for (; I + TaskSize < End; I += TaskSize) {<br>
- Tg.spawn([=, &Fn] {<br>
+ TG.spawn([=, &Fn] {<br>
for (IndexTy J = I, E = I + TaskSize; J != E; ++J)<br>
Fn(J);<br>
});<br>
}<br>
- Tg.spawn([=, &Fn] {<br>
+ TG.spawn([=, &Fn] {<br>
for (IndexTy J = I; J < End; ++J)<br>
Fn(J);<br>
});<br>
}<br>
#endif<br>
-} // end namespace lld<br>
+} // End namespace lld<br>
<br>
#endif // LLD_CORE_PARALLEL_H<br>
<br>
Added: lld/trunk/include/lld/Core/TaskGroup.h<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/lld/trunk/include/lld/Core/TaskGroup.h?rev=302288&view=auto" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/lld/trunk/include/lld/Core/TaskGroup.h?rev=302288&view=auto</a><br>
==============================================================================<br>
--- lld/trunk/include/lld/Core/TaskGroup.h (added)<br>
+++ lld/trunk/include/lld/Core/TaskGroup.h Fri May 5 16:09:26 2017<br>
@@ -0,0 +1,65 @@<br>
+//===- lld/Core/TaskGroup.h - Task Group ----------------------------------===//<br>
+//<br>
+// The LLVM Linker<br>
+//<br>
+// This file is distributed under the University of Illinois Open Source<br>
+// License. See LICENSE.TXT for details.<br>
+//<br>
+//===----------------------------------------------------------------------===//<br>
+<br>
+#ifndef LLD_CORE_TASKGROUP_H<br>
+#define LLD_CORE_TASKGROUP_H<br>
+<br>
+#include "lld/Core/LLVM.h"<br>
+<br>
+#include <condition_variable><br>
+#include <functional><br>
+#include <mutex><br>
+<br>
+namespace lld {<br>
+/// \brief Allows one or more threads to wait on a potentially unknown number of<br>
+/// events.<br>
+///<br>
+/// A latch starts at \p count. inc() increments this, and dec() decrements it.<br>
+/// All calls to sync() will block while the count is not 0.<br>
+///<br>
+/// Calling dec() on a Latch with a count of 0 has undefined behaivor.<br>
+class Latch {<br>
+ uint32_t _count;<br>
+ mutable std::mutex _condMut;<br>
+ mutable std::condition_variable _cond;<br>
+<br>
+public:<br>
+ explicit Latch(uint32_t count = 0) : _count(count) {}<br>
+ ~Latch() { sync(); }<br>
+<br>
+ void inc() {<br>
+ std::unique_lock<std::mutex> lock(_condMut);<br>
+ ++_count;<br>
+ }<br>
+<br>
+ void dec() {<br>
+ std::unique_lock<std::mutex> lock(_condMut);<br>
+ if (--_count == 0)<br>
+ _cond.notify_all();<br>
+ }<br>
+<br>
+ void sync() const {<br>
+ std::unique_lock<std::mutex> lock(_condMut);<br>
+ _cond.wait(lock, [&] { return _count == 0; });<br>
+ }<br>
+};<br>
+<br>
+/// \brief Allows launching a number of tasks and waiting for them to finish<br>
+/// either explicitly via sync() or implicitly on destruction.<br>
+class TaskGroup {<br>
+ Latch _latch;<br>
+<br>
+public:<br>
+ void spawn(std::function<void()> f);<br>
+<br>
+ void sync() const { _latch.sync(); }<br>
+};<br>
+}<br>
+<br>
+#endif<br>
<br>
Modified: lld/trunk/lib/Core/CMakeLists.txt<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/lld/trunk/lib/Core/CMakeLists.txt?rev=302288&r1=302287&r2=302288&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/lld/trunk/lib/Core/CMakeLists.txt?rev=302288&r1=302287&r2=302288&view=diff</a><br>
==============================================================================<br>
--- lld/trunk/lib/Core/CMakeLists.txt (original)<br>
+++ lld/trunk/lib/Core/CMakeLists.txt Fri May 5 16:09:26 2017<br>
@@ -12,6 +12,7 @@ add_lld_library(lldCore<br>
Resolver.cpp<br>
SymbolTable.cpp<br>
TargetOptionsCommandFlags.cpp<br>
+ TaskGroup.cpp<br>
Writer.cpp<br>
<br>
ADDITIONAL_HEADER_DIRS<br>
<br>
Added: lld/trunk/lib/Core/TaskGroup.cpp<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/lld/trunk/lib/Core/TaskGroup.cpp?rev=302288&view=auto" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/lld/trunk/lib/Core/TaskGroup.cpp?rev=302288&view=auto</a><br>
==============================================================================<br>
--- lld/trunk/lib/Core/TaskGroup.cpp (added)<br>
+++ lld/trunk/lib/Core/TaskGroup.cpp Fri May 5 16:09:26 2017<br>
@@ -0,0 +1,140 @@<br>
+//===- lld/Core/TaskGroup.cpp - Task Group --------------------------------===//<br>
+//<br>
+// The LLVM Linker<br>
+//<br>
+// This file is distributed under the University of Illinois Open Source<br>
+// License. See LICENSE.TXT for details.<br>
+//<br>
+//===----------------------------------------------------------------------===//<br>
+<br>
+#include "lld/Core/TaskGroup.h"<br>
+#include "llvm/config/llvm-config.h"<br>
+<br>
+#include <atomic><br>
+#include <stack><br>
+<br>
+#if defined(_MSC_VER) && LLVM_ENABLE_THREADS<br>
+#include <concrt.h><br>
+#include <ppl.h><br>
+#endif<br>
+<br>
+using namespace lld;<br>
+<br>
+namespace {<br>
+<br>
+/// \brief An abstract class that takes closures and runs them asynchronously.<br>
+class Executor {<br>
+public:<br>
+ virtual ~Executor() = default;<br>
+ virtual void add(std::function<void()> func) = 0;<br>
+<br>
+ static Executor *getDefaultExecutor();<br>
+};<br>
+<br>
+#if !LLVM_ENABLE_THREADS<br>
+class SyncExecutor : public Executor {<br>
+public:<br>
+ virtual void add(std::function<void()> F) { F(); }<br>
+};<br>
+<br>
+Executor *Executor::getDefaultExecutor() {<br>
+ static SyncExecutor Exec;<br>
+ return &Exec;<br>
+}<br>
+<br>
+#elif defined(_MSC_VER)<br>
+/// \brief An Executor that runs tasks via ConcRT.<br>
+class ConcRTExecutor : public Executor {<br>
+ struct Taskish {<br>
+ Taskish(std::function<void()> Task) : Task(Task) {}<br>
+<br>
+ std::function<void()> Task;<br>
+<br>
+ static void run(void *P) {<br>
+ Taskish *Self = static_cast<Taskish *>(P);<br>
+ Self->Task();<br>
+ concurrency::Free(Self);<br>
+ }<br>
+ };<br>
+<br>
+public:<br>
+ virtual void add(std::function<void()> F) {<br>
+ Concurrency::CurrentScheduler::ScheduleTask(<br>
+ Taskish::run, new (concurrency::Alloc(sizeof(Taskish))) Taskish(F));<br>
+ }<br>
+};<br>
+<br>
+Executor *Executor::getDefaultExecutor() {<br>
+ static ConcRTExecutor exec;<br>
+ return &exec;<br>
+}<br>
+<br>
+#else<br>
+/// \brief An implementation of an Executor that runs closures on a thread pool<br>
+/// in filo order.<br>
+class ThreadPoolExecutor : public Executor {<br>
+public:<br>
+ explicit ThreadPoolExecutor(<br>
+ unsigned ThreadCount = std::thread::hardware_concurrency())<br>
+ : Done(ThreadCount) {<br>
+ // Spawn all but one of the threads in another thread as spawning threads<br>
+ // can take a while.<br>
+ std::thread([&, ThreadCount] {<br>
+ for (size_t i = 1; i < ThreadCount; ++i) {<br>
+ std::thread([=] { work(); }).detach();<br>
+ }<br>
+ work();<br>
+ }).detach();<br>
+ }<br>
+<br>
+ ~ThreadPoolExecutor() override {<br>
+ std::unique_lock<std::mutex> Lock(Mutex);<br>
+ Stop = true;<br>
+ Lock.unlock();<br>
+ Cond.notify_all();<br>
+ // Wait for ~Latch.<br>
+ }<br>
+<br>
+ void add(std::function<void()> F) override {<br>
+ std::unique_lock<std::mutex> Lock(Mutex);<br>
+ WorkStack.push(F);<br>
+ Lock.unlock();<br>
+ Cond.notify_one();<br>
+ }<br>
+<br>
+private:<br>
+ void work() {<br>
+ while (true) {<br>
+ std::unique_lock<std::mutex> Lock(Mutex);<br>
+ Cond.wait(Lock, [&] { return Stop || !WorkStack.empty(); });<br>
+ if (Stop)<br>
+ break;<br>
+ auto Task = WorkStack.top();<br>
+ WorkStack.pop();<br>
+ Lock.unlock();<br>
+ Task();<br>
+ }<br>
+ Done.dec();<br>
+ }<br>
+<br>
+ std::atomic<bool> Stop{false};<br>
+ std::stack<std::function<void()>> WorkStack;<br>
+ std::mutex Mutex;<br>
+ std::condition_variable Cond;<br>
+ Latch Done;<br>
+};<br>
+<br>
+Executor *Executor::getDefaultExecutor() {<br>
+ static ThreadPoolExecutor exec;<br>
+ return &exec;<br>
+}<br>
+#endif<br>
+}<br>
+<br>
+void TaskGroup::spawn(std::function<void()> f) {<br>
+ _latch.inc();<br>
+ Executor::getDefaultExecutor()->add([&, f] {<br>
+ f();<br>
+ _latch.dec();<br>
+ });<br>
+}<br>
<br>
Modified: lld/trunk/unittests/CoreTests/CMakeLists.txt<br>
URL: <a href="http://llvm.org/viewvc/llvm-project/lld/trunk/unittests/CoreTests/CMakeLists.txt?rev=302288&r1=302287&r2=302288&view=diff" rel="noreferrer" target="_blank">http://llvm.org/viewvc/llvm-project/lld/trunk/unittests/CoreTests/CMakeLists.txt?rev=302288&r1=302287&r2=302288&view=diff</a><br>
==============================================================================<br>
--- lld/trunk/unittests/CoreTests/CMakeLists.txt (original)<br>
+++ lld/trunk/unittests/CoreTests/CMakeLists.txt Fri May 5 16:09:26 2017<br>
@@ -3,5 +3,5 @@ add_lld_unittest(CoreTests<br>
)<br>
<br>
target_link_libraries(CoreTests<br>
- ${LLVM_PTHREAD_LIB}<br>
+ lldCore ${LLVM_PTHREAD_LIB}<br>
)<br>
<br>
<br>
_______________________________________________<br>
llvm-commits mailing list<br>
<a href="mailto:llvm-commits@lists.llvm.org" target="_blank">llvm-commits@lists.llvm.org</a><br>
<a href="http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits" rel="noreferrer" target="_blank">http://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits</a><br>
</blockquote></div></div></div></blockquote></div>