[lld] r288002 - Add paralell_for and use it where appropriate.
Rui Ueyama via llvm-commits
llvm-commits at lists.llvm.org
Sun Nov 27 11:28:33 PST 2016
Author: ruiu
Date: Sun Nov 27 13:28:32 2016
New Revision: 288002
URL: http://llvm.org/viewvc/llvm-project?rev=288002&view=rev
Log:
Add paralell_for and use it where appropriate.
When we iterate over numbers as opposed to iterable elements,
parallel_for fits better than parallel_for_each.
Modified:
lld/trunk/ELF/SyntheticSections.cpp
lld/trunk/include/lld/Core/Parallel.h
Modified: lld/trunk/ELF/SyntheticSections.cpp
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/ELF/SyntheticSections.cpp?rev=288002&r1=288001&r2=288002&view=diff
==============================================================================
--- lld/trunk/ELF/SyntheticSections.cpp (original)
+++ lld/trunk/ELF/SyntheticSections.cpp Sun Nov 27 13:28:32 2016
@@ -334,16 +334,14 @@ void BuildIdSection<ELFT>::computeHash(
std::vector<ArrayRef<uint8_t>> Chunks = split(Data, 1024 * 1024);
std::vector<uint8_t> Hashes(Chunks.size() * HashSize);
- auto Fn = [&](ArrayRef<uint8_t> &Chunk) {
- size_t Idx = &Chunk - Chunks.data();
- HashFn(Hashes.data() + Idx * HashSize, Chunk);
- };
+ auto Fn = [&](size_t I) { HashFn(Hashes.data() + I * HashSize, Chunks[I]); };
// Compute hash values.
if (Config->Threads)
- parallel_for_each(Chunks.begin(), Chunks.end(), Fn);
+ parallel_for(size_t(0), Chunks.size(), Fn);
else
- std::for_each(Chunks.begin(), Chunks.end(), Fn);
+ for (size_t I = 0, E = Chunks.size(); I != E; ++I)
+ Fn(I);
// Write to the final output buffer.
HashFn(HashBuf, Hashes);
Modified: lld/trunk/include/lld/Core/Parallel.h
URL: http://llvm.org/viewvc/llvm-project/lld/trunk/include/lld/Core/Parallel.h?rev=288002&r1=288001&r2=288002&view=diff
==============================================================================
--- lld/trunk/include/lld/Core/Parallel.h (original)
+++ lld/trunk/include/lld/Core/Parallel.h Sun Nov 27 13:28:32 2016
@@ -270,33 +270,63 @@ template <class T> void parallel_sort(T
}
#if !defined(LLVM_ENABLE_THREADS) || LLVM_ENABLE_THREADS == 0
-template <class Iterator, class Func>
-void parallel_for_each(Iterator begin, Iterator end, Func func) {
- std::for_each(begin, end, func);
+template <class IterTy, class FuncTy>
+void parallel_for_each(IterTy Begin, IterTy End, FuncTy Fn) {
+ std::for_each(Begin, End, Fn);
+}
+
+template <class IndexTy, class FuncTy>
+void parallel_for(IndexTy Begin, IndexTy End, FuncTy Fn) {
+ for (IndexTy I = Begin; I != End; ++I)
+ Fn(I);
}
#elif defined(_MSC_VER)
// Use ppl parallel_for_each on Windows.
-template <class Iterator, class Func>
-void parallel_for_each(Iterator begin, Iterator end, Func func) {
- concurrency::parallel_for_each(begin, end, func);
+template <class IterTy, class FuncTy>
+void parallel_for_each(IterTy Begin, IterTy End, FuncTy Fn) {
+ concurrency::parallel_for_each(Begin, End, Fn);
+}
+
+template <class IndexTy, class FuncTy>
+void parallel_for(IndexTy Begin, IndexTy End, FuncTy Fn) {
+ concurrency::parallel_for(Begin, End, Fn);
}
#else
-template <class Iterator, class Func>
-void parallel_for_each(Iterator begin, Iterator end, Func func) {
+template <class IterTy, class FuncTy>
+void parallel_for_each(IterTy Begin, IterTy End, FuncTy Fn) {
// TaskGroup has a relatively high overhead, so we want to reduce
// the number of spawn() calls. We'll create up to 1024 tasks here.
// (Note that 1024 is an arbitrary number. This code probably needs
// improving to take the number of available cores into account.)
- ptrdiff_t taskSize = std::distance(begin, end) / 1024;
- if (taskSize == 0)
- taskSize = 1;
-
- TaskGroup tg;
- while (taskSize <= std::distance(begin, end)) {
- tg.spawn([=, &func] { std::for_each(begin, begin + taskSize, func); });
- begin += taskSize;
+ ptrdiff_t TaskSize = std::distance(Begin, End) / 1024;
+ if (TaskSize == 0)
+ TaskSize = 1;
+
+ TaskGroup Tg;
+ while (TaskSize <= std::distance(Begin, End)) {
+ Tg.spawn([=, &Fn] { std::for_each(Begin, Begin + TaskSize, Fn); });
+ Begin += TaskSize;
+ }
+ std::for_each(Begin, End, Fn);
+}
+
+template <class IndexTy, class FuncTy>
+void parallel_for(IndexTy Begin, IndexTy End, FuncTy Fn) {
+ ptrdiff_t TaskSize = (End - Begin) / 1024;
+ if (TaskSize == 0)
+ TaskSize = 1;
+
+ TaskGroup Tg;
+ IndexTy I = Begin;
+ for (; I < End; I += TaskSize) {
+ Tg.spawn([=, &Fn] {
+ for (IndexTy J = I, E = I + TaskSize; J != E; ++J)
+ Fn(J);
+ });
+ Begin += TaskSize;
}
- std::for_each(begin, end, func);
+ for (; I < End; ++I)
+ Fn(I);
}
#endif
} // end namespace lld
More information about the llvm-commits
mailing list