[Lldb-commits] [clang] [clang-tools-extra] [lld] [lldb] [llvm] [mlir] Rename llvm::ThreadPool -> llvm::DefaultThreadPool (NFC) (PR #83702)
Mehdi Amini via lldb-commits
lldb-commits at lists.llvm.org
Tue Mar 5 17:41:24 PST 2024
https://github.com/joker-eph updated https://github.com/llvm/llvm-project/pull/83702
>From d1dc1dfb1bb601fe90289bf29176c74a38ac5697 Mon Sep 17 00:00:00 2001
From: Mehdi Amini <joker.eph at gmail.com>
Date: Tue, 5 Mar 2024 10:38:41 -0800
Subject: [PATCH] Rename llvm::ThreadPool -> llvm::DefaultThreadPool (NFC)
The base class llvm::ThreadPoolInterface will be renamed llvm::ThreadPool
in a subsequent commit.
---
bolt/lib/Core/ParallelUtilities.cpp | 4 ++--
bolt/tools/merge-fdata/merge-fdata.cpp | 2 +-
.../clang-doc/tool/ClangDocMain.cpp | 2 +-
.../tool/FindAllSymbolsMain.cpp | 2 +-
clang/lib/Tooling/AllTUsExecution.cpp | 2 +-
clang/tools/clang-scan-deps/ClangScanDeps.cpp | 2 +-
lld/MachO/Writer.cpp | 2 +-
lldb/source/Core/Debugger.cpp | 4 ++--
llvm/docs/ORCv2.rst | 2 +-
.../SpeculativeJIT/SpeculativeJIT.cpp | 2 +-
llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h | 2 +-
llvm/include/llvm/Support/ThreadPool.h | 7 +++---
llvm/lib/CodeGen/ParallelCG.cpp | 2 +-
llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp | 2 +-
.../DWARFLinker/Parallel/DWARFLinkerImpl.cpp | 2 +-
llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp | 2 +-
llvm/lib/ExecutionEngine/Orc/LLJIT.cpp | 4 ++--
llvm/lib/LTO/LTO.cpp | 2 +-
llvm/lib/LTO/LTOBackend.cpp | 2 +-
llvm/lib/LTO/ThinLTOCodeGenerator.cpp | 4 ++--
llvm/lib/Support/BalancedPartitioning.cpp | 2 +-
llvm/tools/dsymutil/dsymutil.cpp | 2 +-
llvm/tools/llvm-cov/CodeCoverage.cpp | 2 +-
llvm/tools/llvm-cov/CoverageExporterJson.cpp | 2 +-
llvm/tools/llvm-cov/CoverageReport.cpp | 4 ++--
.../tools/llvm-debuginfod/llvm-debuginfod.cpp | 2 +-
llvm/tools/llvm-profdata/llvm-profdata.cpp | 2 +-
llvm/tools/llvm-reduce/deltas/Delta.cpp | 2 +-
llvm/unittests/ADT/LazyAtomicPointerTest.cpp | 4 ++--
llvm/unittests/Debuginfod/HTTPServerTests.cpp | 16 +++++++-------
llvm/unittests/Support/ParallelTest.cpp | 2 +-
llvm/unittests/Support/ThreadPool.cpp | 22 +++++++++----------
.../Support/ThreadSafeAllocatorTest.cpp | 6 ++---
mlir/include/mlir/IR/MLIRContext.h | 2 +-
mlir/lib/CAPI/IR/Support.cpp | 2 +-
mlir/lib/ExecutionEngine/AsyncRuntime.cpp | 2 +-
mlir/lib/IR/MLIRContext.cpp | 4 ++--
37 files changed, 65 insertions(+), 66 deletions(-)
diff --git a/bolt/lib/Core/ParallelUtilities.cpp b/bolt/lib/Core/ParallelUtilities.cpp
index 88d9444a6a2ba7..5f5e96e0e7881c 100644
--- a/bolt/lib/Core/ParallelUtilities.cpp
+++ b/bolt/lib/Core/ParallelUtilities.cpp
@@ -49,7 +49,7 @@ namespace ParallelUtilities {
namespace {
/// A single thread pool that is used to run parallel tasks
-std::unique_ptr<ThreadPool> ThreadPoolPtr;
+std::unique_ptr<DefaultThreadPool> ThreadPoolPtr;
unsigned computeCostFor(const BinaryFunction &BF,
const PredicateTy &SkipPredicate,
@@ -106,7 +106,7 @@ ThreadPoolInterface &getThreadPool() {
if (ThreadPoolPtr.get())
return *ThreadPoolPtr;
- ThreadPoolPtr = std::make_unique<ThreadPool>(
+ ThreadPoolPtr = std::make_unique<DefaultThreadPool>(
llvm::hardware_concurrency(opts::ThreadCount));
return *ThreadPoolPtr;
}
diff --git a/bolt/tools/merge-fdata/merge-fdata.cpp b/bolt/tools/merge-fdata/merge-fdata.cpp
index c6dfd3cfdc56de..f2ac5ad4492ee5 100644
--- a/bolt/tools/merge-fdata/merge-fdata.cpp
+++ b/bolt/tools/merge-fdata/merge-fdata.cpp
@@ -316,7 +316,7 @@ void mergeLegacyProfiles(const SmallVectorImpl<std::string> &Filenames) {
// least 4 tasks.
ThreadPoolStrategy S = optimal_concurrency(
std::max(Filenames.size() / 4, static_cast<size_t>(1)));
- ThreadPool Pool(S);
+ DefaultThreadPool Pool(S);
DenseMap<llvm::thread::id, ProfileTy> ParsedProfiles(
Pool.getMaxConcurrency());
for (const auto &Filename : Filenames)
diff --git a/clang-tools-extra/clang-doc/tool/ClangDocMain.cpp b/clang-tools-extra/clang-doc/tool/ClangDocMain.cpp
index 22bdb5de22d871..21b581fa6df2e1 100644
--- a/clang-tools-extra/clang-doc/tool/ClangDocMain.cpp
+++ b/clang-tools-extra/clang-doc/tool/ClangDocMain.cpp
@@ -238,7 +238,7 @@ Example usage for a project using a compile commands database:
Error = false;
llvm::sys::Mutex IndexMutex;
// ExecutorConcurrency is a flag exposed by AllTUsExecution.h
- llvm::ThreadPool Pool(llvm::hardware_concurrency(ExecutorConcurrency));
+ llvm::DefaultThreadPool Pool(llvm::hardware_concurrency(ExecutorConcurrency));
for (auto &Group : USRToBitcode) {
Pool.async([&]() {
std::vector<std::unique_ptr<doc::Info>> Infos;
diff --git a/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/FindAllSymbolsMain.cpp b/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/FindAllSymbolsMain.cpp
index b2d0efecc20692..298b02e77cb0aa 100644
--- a/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/FindAllSymbolsMain.cpp
+++ b/clang-tools-extra/clang-include-fixer/find-all-symbols/tool/FindAllSymbolsMain.cpp
@@ -89,7 +89,7 @@ bool Merge(llvm::StringRef MergeDir, llvm::StringRef OutputFile) {
// Load all symbol files in MergeDir.
{
- llvm::ThreadPool Pool;
+ llvm::DefaultThreadPool Pool;
for (llvm::sys::fs::directory_iterator Dir(MergeDir, EC), DirEnd;
Dir != DirEnd && !EC; Dir.increment(EC)) {
// Parse YAML files in parallel.
diff --git a/clang/lib/Tooling/AllTUsExecution.cpp b/clang/lib/Tooling/AllTUsExecution.cpp
index f327d013994141..9cad8680447be9 100644
--- a/clang/lib/Tooling/AllTUsExecution.cpp
+++ b/clang/lib/Tooling/AllTUsExecution.cpp
@@ -115,7 +115,7 @@ llvm::Error AllTUsToolExecutor::execute(
auto &Action = Actions.front();
{
- llvm::ThreadPool Pool(llvm::hardware_concurrency(ThreadCount));
+ llvm::DefaultThreadPool Pool(llvm::hardware_concurrency(ThreadCount));
for (std::string File : Files) {
Pool.async(
[&](std::string Path) {
diff --git a/clang/tools/clang-scan-deps/ClangScanDeps.cpp b/clang/tools/clang-scan-deps/ClangScanDeps.cpp
index 9811d2a8753359..d042fecc3dbe63 100644
--- a/clang/tools/clang-scan-deps/ClangScanDeps.cpp
+++ b/clang/tools/clang-scan-deps/ClangScanDeps.cpp
@@ -869,7 +869,7 @@ int clang_scan_deps_main(int argc, char **argv, const llvm::ToolContext &) {
DependencyScanningService Service(ScanMode, Format, OptimizeArgs,
EagerLoadModules);
- llvm::ThreadPool Pool(llvm::hardware_concurrency(NumThreads));
+ llvm::DefaultThreadPool Pool(llvm::hardware_concurrency(NumThreads));
std::vector<std::unique_ptr<DependencyScanningTool>> WorkerTools;
for (unsigned I = 0; I < Pool.getMaxConcurrency(); ++I)
WorkerTools.push_back(std::make_unique<DependencyScanningTool>(Service));
diff --git a/lld/MachO/Writer.cpp b/lld/MachO/Writer.cpp
index 65b598d1d7c422..9b0a32c136e8b1 100644
--- a/lld/MachO/Writer.cpp
+++ b/lld/MachO/Writer.cpp
@@ -66,7 +66,7 @@ class Writer {
template <class LP> void run();
- ThreadPool threadPool;
+ DefaultThreadPool threadPool;
std::unique_ptr<FileOutputBuffer> &buffer;
uint64_t addr = 0;
uint64_t fileOff = 0;
diff --git a/lldb/source/Core/Debugger.cpp b/lldb/source/Core/Debugger.cpp
index 1b25527abf981f..9d62b2a908f770 100644
--- a/lldb/source/Core/Debugger.cpp
+++ b/lldb/source/Core/Debugger.cpp
@@ -104,7 +104,7 @@ static std::recursive_mutex *g_debugger_list_mutex_ptr =
nullptr; // NOTE: intentional leak to avoid issues with C++ destructor chain
static Debugger::DebuggerList *g_debugger_list_ptr =
nullptr; // NOTE: intentional leak to avoid issues with C++ destructor chain
-static llvm::ThreadPool *g_thread_pool = nullptr;
+static llvm::DefaultThreadPoolThreadPool *g_thread_pool = nullptr;
static constexpr OptionEnumValueElement g_show_disassembly_enum_values[] = {
{
@@ -609,7 +609,7 @@ void Debugger::Initialize(LoadPluginCallbackType load_plugin_callback) {
"Debugger::Initialize called more than once!");
g_debugger_list_mutex_ptr = new std::recursive_mutex();
g_debugger_list_ptr = new DebuggerList();
- g_thread_pool = new llvm::ThreadPool(llvm::optimal_concurrency());
+ g_thread_pool = new llvm::DefaultThreadPool(llvm::optimal_concurrency());
g_load_plugin_callback = load_plugin_callback;
}
diff --git a/llvm/docs/ORCv2.rst b/llvm/docs/ORCv2.rst
index add05e05a80e5f..910ef5b9f3d02f 100644
--- a/llvm/docs/ORCv2.rst
+++ b/llvm/docs/ORCv2.rst
@@ -738,7 +738,7 @@ or creating any Modules attached to it. E.g.
ThreadSafeContext TSCtx(std::make_unique<LLVMContext>());
- ThreadPool TP(NumThreads);
+ DefaultThreadPool TP(NumThreads);
JITStack J;
for (auto &ModulePath : ModulePaths) {
diff --git a/llvm/examples/SpeculativeJIT/SpeculativeJIT.cpp b/llvm/examples/SpeculativeJIT/SpeculativeJIT.cpp
index fdd376d82da5d8..0d97d379d2279e 100644
--- a/llvm/examples/SpeculativeJIT/SpeculativeJIT.cpp
+++ b/llvm/examples/SpeculativeJIT/SpeculativeJIT.cpp
@@ -136,7 +136,7 @@ class SpeculativeJIT {
std::unique_ptr<ExecutionSession> ES;
DataLayout DL;
MangleAndInterner Mangle{*ES, DL};
- ThreadPool CompileThreads{llvm::hardware_concurrency(NumThreads)};
+ DefaultThreadPool CompileThreads{llvm::hardware_concurrency(NumThreads)};
JITDylib &MainJD;
diff --git a/llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h b/llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h
index 923976b182d1e5..76d16e63df2815 100644
--- a/llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h
+++ b/llvm/include/llvm/ExecutionEngine/Orc/LLJIT.h
@@ -254,7 +254,7 @@ class LLJIT {
DataLayout DL;
Triple TT;
- std::unique_ptr<ThreadPool> CompileThreads;
+ std::unique_ptr<DefaultThreadPool> CompileThreads;
std::unique_ptr<ObjectLayer> ObjLinkingLayer;
std::unique_ptr<ObjectTransformLayer> ObjTransformLayer;
diff --git a/llvm/include/llvm/Support/ThreadPool.h b/llvm/include/llvm/Support/ThreadPool.h
index 93f02729f047aa..014b7a09d7c8ba 100644
--- a/llvm/include/llvm/Support/ThreadPool.h
+++ b/llvm/include/llvm/Support/ThreadPool.h
@@ -212,8 +212,7 @@ class StdThreadPool : public ThreadPoolInterface {
/// Maximum number of threads to potentially grow this pool to.
const unsigned MaxThreadCount;
};
-
-#endif // LLVM_ENABLE_THREADS Disabled
+#endif // LLVM_ENABLE_THREADS
/// A non-threaded implementation.
class SingleThreadExecutor : public ThreadPoolInterface {
@@ -253,9 +252,9 @@ class SingleThreadExecutor : public ThreadPoolInterface {
};
#if LLVM_ENABLE_THREADS
-using ThreadPool = StdThreadPool;
+using DefaultThreadPool = StdThreadPool;
#else
-using ThreadPool = SingleThreadExecutor;
+using DefaultThreadPool = SingleThreadExecutor;
#endif
/// A group of tasks to be run on a thread pool. Thread pool tasks in different
diff --git a/llvm/lib/CodeGen/ParallelCG.cpp b/llvm/lib/CodeGen/ParallelCG.cpp
index 43b23368ead270..ceb64b2badab56 100644
--- a/llvm/lib/CodeGen/ParallelCG.cpp
+++ b/llvm/lib/CodeGen/ParallelCG.cpp
@@ -52,7 +52,7 @@ void llvm::splitCodeGen(
// Create ThreadPool in nested scope so that threads will be joined
// on destruction.
{
- ThreadPool CodegenThreadPool(hardware_concurrency(OSs.size()));
+ DefaultThreadPool CodegenThreadPool(hardware_concurrency(OSs.size()));
int ThreadCount = 0;
SplitModule(
diff --git a/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp b/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp
index 4f5a4e2ffc702a..9b581a6c9ab774 100644
--- a/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp
+++ b/llvm/lib/DWARFLinker/Classic/DWARFLinker.cpp
@@ -2935,7 +2935,7 @@ Error DWARFLinker::link() {
}
EmitLambda();
} else {
- ThreadPool Pool(hardware_concurrency(2));
+ DefaultThreadPool Pool(hardware_concurrency(2));
Pool.async(AnalyzeAll);
Pool.async(CloneAll);
Pool.wait();
diff --git a/llvm/lib/DWARFLinker/Parallel/DWARFLinkerImpl.cpp b/llvm/lib/DWARFLinker/Parallel/DWARFLinkerImpl.cpp
index a052969e74c0f8..49b08997eb9c1c 100644
--- a/llvm/lib/DWARFLinker/Parallel/DWARFLinkerImpl.cpp
+++ b/llvm/lib/DWARFLinker/Parallel/DWARFLinkerImpl.cpp
@@ -192,7 +192,7 @@ Error DWARFLinkerImpl::link() {
Context->InputDWARFFile.unload();
}
} else {
- ThreadPool Pool(llvm::parallel::strategy);
+ DefaultThreadPool Pool(llvm::parallel::strategy);
for (std::unique_ptr<LinkContext> &Context : ObjectContexts)
Pool.async([&]() {
// Link object file.
diff --git a/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp b/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp
index 3a28cd412de929..ff6b560d11726b 100644
--- a/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp
+++ b/llvm/lib/DebugInfo/GSYM/DwarfTransformer.cpp
@@ -601,7 +601,7 @@ Error DwarfTransformer::convert(uint32_t NumThreads, OutputAggregator &Out) {
// Now parse all DIEs in case we have cross compile unit references in a
// thread pool.
- ThreadPool pool(hardware_concurrency(NumThreads));
+ DefaultThreadPool pool(hardware_concurrency(NumThreads));
for (const auto &CU : DICtx.compile_units())
pool.async([&CU]() { CU->getUnitDIE(false /*CUDieOnly*/); });
pool.wait();
diff --git a/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp b/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp
index 833dcb9d5bf2e7..79adda5b7bc034 100644
--- a/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp
+++ b/llvm/lib/ExecutionEngine/Orc/LLJIT.cpp
@@ -972,8 +972,8 @@ LLJIT::LLJIT(LLJITBuilderState &S, Error &Err)
if (S.NumCompileThreads > 0) {
InitHelperTransformLayer->setCloneToNewContextOnEmit(true);
- CompileThreads =
- std::make_unique<ThreadPool>(hardware_concurrency(S.NumCompileThreads));
+ CompileThreads = std::make_unique<DefaultThreadPool>(
+ hardware_concurrency(S.NumCompileThreads));
ES->setDispatchTask([this](std::unique_ptr<Task> T) {
// FIXME: We should be able to use move-capture here, but ThreadPool's
// AsyncTaskTys are std::functions rather than unique_functions
diff --git a/llvm/lib/LTO/LTO.cpp b/llvm/lib/LTO/LTO.cpp
index 34a49c8588b2f7..9c93ec70da7764 100644
--- a/llvm/lib/LTO/LTO.cpp
+++ b/llvm/lib/LTO/LTO.cpp
@@ -1409,7 +1409,7 @@ class lto::ThinBackendProc {
namespace {
class InProcessThinBackend : public ThinBackendProc {
- ThreadPool BackendThreadPool;
+ DefaultThreadPool BackendThreadPool;
AddStreamFn AddStream;
FileCache Cache;
std::set<GlobalValue::GUID> CfiFunctionDefs;
diff --git a/llvm/lib/LTO/LTOBackend.cpp b/llvm/lib/LTO/LTOBackend.cpp
index 6cfe67779b1a7d..71e8849dc3cc91 100644
--- a/llvm/lib/LTO/LTOBackend.cpp
+++ b/llvm/lib/LTO/LTOBackend.cpp
@@ -431,7 +431,7 @@ static void splitCodeGen(const Config &C, TargetMachine *TM,
AddStreamFn AddStream,
unsigned ParallelCodeGenParallelismLevel, Module &Mod,
const ModuleSummaryIndex &CombinedIndex) {
- ThreadPool CodegenThreadPool(
+ DefaultThreadPool CodegenThreadPool(
heavyweight_hardware_concurrency(ParallelCodeGenParallelismLevel));
unsigned ThreadCount = 0;
const Target *T = &TM->getTarget();
diff --git a/llvm/lib/LTO/ThinLTOCodeGenerator.cpp b/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
index 8fd181846f0c4c..8f517eb50dc76f 100644
--- a/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
+++ b/llvm/lib/LTO/ThinLTOCodeGenerator.cpp
@@ -980,7 +980,7 @@ void ThinLTOCodeGenerator::run() {
if (CodeGenOnly) {
// Perform only parallel codegen and return.
- ThreadPool Pool;
+ DefaultThreadPool Pool;
int count = 0;
for (auto &Mod : Modules) {
Pool.async([&](int count) {
@@ -1126,7 +1126,7 @@ void ThinLTOCodeGenerator::run() {
// Parallel optimizer + codegen
{
- ThreadPool Pool(heavyweight_hardware_concurrency(ThreadCount));
+ DefaultThreadPool Pool(heavyweight_hardware_concurrency(ThreadCount));
for (auto IndexCount : ModulesOrdering) {
auto &Mod = Modules[IndexCount];
Pool.async([&](int count) {
diff --git a/llvm/lib/Support/BalancedPartitioning.cpp b/llvm/lib/Support/BalancedPartitioning.cpp
index cb6ba61179941f..f4254b50d26c91 100644
--- a/llvm/lib/Support/BalancedPartitioning.cpp
+++ b/llvm/lib/Support/BalancedPartitioning.cpp
@@ -82,7 +82,7 @@ void BalancedPartitioning::run(std::vector<BPFunctionNode> &Nodes) const {
Nodes.size(), Config.SplitDepth, Config.IterationsPerSplit));
std::optional<BPThreadPool> TP;
#if LLVM_ENABLE_THREADS
- ThreadPool TheThreadPool;
+ DefaultThreadPool TheThreadPool;
if (Config.TaskSplitDepth > 1)
TP.emplace(TheThreadPool);
#endif
diff --git a/llvm/tools/dsymutil/dsymutil.cpp b/llvm/tools/dsymutil/dsymutil.cpp
index b0e988c6f8e4b8..25e281c415e75a 100644
--- a/llvm/tools/dsymutil/dsymutil.cpp
+++ b/llvm/tools/dsymutil/dsymutil.cpp
@@ -734,7 +734,7 @@ int dsymutil_main(int argc, char **argv, const llvm::ToolContext &) {
S.ThreadsRequested = DebugMapPtrsOrErr->size();
S.Limit = true;
}
- ThreadPool Threads(S);
+ DefaultThreadPool Threads(S);
// If there is more than one link to execute, we need to generate
// temporary files.
diff --git a/llvm/tools/llvm-cov/CodeCoverage.cpp b/llvm/tools/llvm-cov/CodeCoverage.cpp
index 049e89d1a23003..1e5bfbe5c3aade 100644
--- a/llvm/tools/llvm-cov/CodeCoverage.cpp
+++ b/llvm/tools/llvm-cov/CodeCoverage.cpp
@@ -1217,7 +1217,7 @@ int CodeCoverageTool::doShow(int argc, const char **argv,
ShowFilenames);
} else {
// In -output-dir mode, it's safe to use multiple threads to print files.
- ThreadPool Pool(S);
+ DefaultThreadPool Pool(S);
for (const std::string &SourceFile : SourceFiles)
Pool.async(&CodeCoverageTool::writeSourceFileView, this, SourceFile,
Coverage.get(), Printer.get(), ShowFilenames);
diff --git a/llvm/tools/llvm-cov/CoverageExporterJson.cpp b/llvm/tools/llvm-cov/CoverageExporterJson.cpp
index a424bbe06e0ecd..9a8c7c94f06124 100644
--- a/llvm/tools/llvm-cov/CoverageExporterJson.cpp
+++ b/llvm/tools/llvm-cov/CoverageExporterJson.cpp
@@ -277,7 +277,7 @@ json::Array renderFiles(const coverage::CoverageMapping &Coverage,
S = heavyweight_hardware_concurrency(SourceFiles.size());
S.Limit = true;
}
- ThreadPool Pool(S);
+ DefaultThreadPool Pool(S);
json::Array FileArray;
std::mutex FileArrayMutex;
diff --git a/llvm/tools/llvm-cov/CoverageReport.cpp b/llvm/tools/llvm-cov/CoverageReport.cpp
index 8cc073e4def8fc..49a35f2a943e6f 100644
--- a/llvm/tools/llvm-cov/CoverageReport.cpp
+++ b/llvm/tools/llvm-cov/CoverageReport.cpp
@@ -465,7 +465,7 @@ std::vector<FileCoverageSummary> CoverageReport::prepareFileReports(
S = heavyweight_hardware_concurrency(Files.size());
S.Limit = true;
}
- ThreadPool Pool(S);
+ DefaultThreadPool Pool(S);
std::vector<FileCoverageSummary> FileReports;
FileReports.reserve(Files.size());
@@ -580,7 +580,7 @@ Expected<FileCoverageSummary> DirectoryCoverageReport::prepareDirectoryReports(
PoolS = heavyweight_hardware_concurrency(Files.size());
PoolS.Limit = true;
}
- ThreadPool Pool(PoolS);
+ DefaultThreadPool Pool(PoolS);
TPool = &Pool;
LCPStack = {RootLCP};
diff --git a/llvm/tools/llvm-debuginfod/llvm-debuginfod.cpp b/llvm/tools/llvm-debuginfod/llvm-debuginfod.cpp
index 9d347dbd68f395..44d656148a4e2c 100644
--- a/llvm/tools/llvm-debuginfod/llvm-debuginfod.cpp
+++ b/llvm/tools/llvm-debuginfod/llvm-debuginfod.cpp
@@ -127,7 +127,7 @@ int llvm_debuginfod_main(int argc, char **argv, const llvm::ToolContext &) {
for (const std::string &Path : ScanPaths)
Paths.push_back(Path);
- ThreadPool Pool(hardware_concurrency(MaxConcurrency));
+ DefaultThreadPool Pool(hardware_concurrency(MaxConcurrency));
DebuginfodLog Log;
DebuginfodCollection Collection(Paths, Log, Pool, MinInterval);
DebuginfodServer Server(Log, Collection);
diff --git a/llvm/tools/llvm-profdata/llvm-profdata.cpp b/llvm/tools/llvm-profdata/llvm-profdata.cpp
index 577a8825fcaa7d..8400b0769944cf 100644
--- a/llvm/tools/llvm-profdata/llvm-profdata.cpp
+++ b/llvm/tools/llvm-profdata/llvm-profdata.cpp
@@ -898,7 +898,7 @@ static void mergeInstrProfile(const WeightedFileVector &Inputs,
loadInput(Input, Remapper, Correlator.get(), ProfiledBinary,
Contexts[0].get());
} else {
- ThreadPool Pool(hardware_concurrency(NumThreads));
+ DefaultThreadPool Pool(hardware_concurrency(NumThreads));
// Load the inputs in parallel (N/NumThreads serial steps).
unsigned Ctx = 0;
diff --git a/llvm/tools/llvm-reduce/deltas/Delta.cpp b/llvm/tools/llvm-reduce/deltas/Delta.cpp
index 569117e70d6b42..4b84921618e1cd 100644
--- a/llvm/tools/llvm-reduce/deltas/Delta.cpp
+++ b/llvm/tools/llvm-reduce/deltas/Delta.cpp
@@ -222,7 +222,7 @@ void llvm::runDeltaPass(TestRunner &Test, ReductionFunc ExtractChunksFromModule,
std::unique_ptr<ThreadPoolInterface> ChunkThreadPoolPtr;
if (NumJobs > 1)
ChunkThreadPoolPtr =
- std::make_unique<ThreadPool>(hardware_concurrency(NumJobs));
+ std::make_unique<DefaultThreadPool>(hardware_concurrency(NumJobs));
bool FoundAtLeastOneNewUninterestingChunkWithCurrentGranularity;
do {
diff --git a/llvm/unittests/ADT/LazyAtomicPointerTest.cpp b/llvm/unittests/ADT/LazyAtomicPointerTest.cpp
index efead0bdf0a31b..35582256542684 100644
--- a/llvm/unittests/ADT/LazyAtomicPointerTest.cpp
+++ b/llvm/unittests/ADT/LazyAtomicPointerTest.cpp
@@ -18,7 +18,7 @@ namespace {
TEST(LazyAtomicPointer, loadOrGenerate) {
int Value = 0;
LazyAtomicPointer<int> Ptr;
- ThreadPool Threads;
+ DefaultThreadPool Threads;
for (unsigned I = 0; I < 4; ++I)
Threads.async([&]() {
Ptr.loadOrGenerate([&]() {
@@ -38,7 +38,7 @@ TEST(LazyAtomicPointer, loadOrGenerate) {
TEST(LazyAtomicPointer, BusyState) {
int Value = 0;
LazyAtomicPointer<int> Ptr;
- ThreadPool Threads;
+ DefaultThreadPool Threads;
std::mutex BusyLock, EndLock;
std::condition_variable Busy, End;
diff --git a/llvm/unittests/Debuginfod/HTTPServerTests.cpp b/llvm/unittests/Debuginfod/HTTPServerTests.cpp
index b0af2f850576d8..cd1d5f2d9fc700 100644
--- a/llvm/unittests/Debuginfod/HTTPServerTests.cpp
+++ b/llvm/unittests/Debuginfod/HTTPServerTests.cpp
@@ -92,7 +92,7 @@ TEST_F(HTTPClientServerTest, Hello) {
Expected<unsigned> PortOrErr = Server.bind();
EXPECT_THAT_EXPECTED(PortOrErr, Succeeded());
unsigned Port = *PortOrErr;
- ThreadPool Pool(hardware_concurrency(1));
+ DefaultThreadPool Pool(hardware_concurrency(1));
Pool.async([&]() { EXPECT_THAT_ERROR(Server.listen(), Succeeded()); });
std::string Url = "http://localhost:" + utostr(Port);
HTTPRequest Request(Url);
@@ -116,7 +116,7 @@ TEST_F(HTTPClientServerTest, LambdaHandlerHello) {
Expected<unsigned> PortOrErr = Server.bind();
EXPECT_THAT_EXPECTED(PortOrErr, Succeeded());
unsigned Port = *PortOrErr;
- ThreadPool Pool(hardware_concurrency(1));
+ DefaultThreadPool Pool(hardware_concurrency(1));
Pool.async([&]() { EXPECT_THAT_ERROR(Server.listen(), Succeeded()); });
std::string Url = "http://localhost:" + utostr(Port);
HTTPRequest Request(Url);
@@ -135,7 +135,7 @@ TEST_F(HTTPClientServerTest, StreamingHello) {
Expected<unsigned> PortOrErr = Server.bind();
EXPECT_THAT_EXPECTED(PortOrErr, Succeeded());
unsigned Port = *PortOrErr;
- ThreadPool Pool(hardware_concurrency(1));
+ DefaultThreadPool Pool(hardware_concurrency(1));
Pool.async([&]() { EXPECT_THAT_ERROR(Server.listen(), Succeeded()); });
std::string Url = "http://localhost:" + utostr(Port);
HTTPRequest Request(Url);
@@ -167,7 +167,7 @@ TEST_F(HTTPClientServerTest, StreamingFileResponse) {
Expected<unsigned> PortOrErr = Server.bind();
EXPECT_THAT_EXPECTED(PortOrErr, Succeeded());
unsigned Port = *PortOrErr;
- ThreadPool Pool(hardware_concurrency(1));
+ DefaultThreadPool Pool(hardware_concurrency(1));
Pool.async([&]() { EXPECT_THAT_ERROR(Server.listen(), Succeeded()); });
std::string Url = "http://localhost:" + utostr(Port);
HTTPRequest Request(Url);
@@ -203,7 +203,7 @@ TEST_F(HTTPClientServerTest, StreamingMissingFileResponse) {
Expected<unsigned> PortOrErr = Server.bind();
EXPECT_THAT_EXPECTED(PortOrErr, Succeeded());
unsigned Port = *PortOrErr;
- ThreadPool Pool(hardware_concurrency(1));
+ DefaultThreadPool Pool(hardware_concurrency(1));
Pool.async([&]() { EXPECT_THAT_ERROR(Server.listen(), Succeeded()); });
std::string Url = "http://localhost:" + utostr(Port);
HTTPRequest Request(Url);
@@ -220,7 +220,7 @@ TEST_F(HTTPClientServerTest, ClientTimeout) {
Expected<unsigned> PortOrErr = Server.bind();
EXPECT_THAT_EXPECTED(PortOrErr, Succeeded());
unsigned Port = *PortOrErr;
- ThreadPool Pool(hardware_concurrency(1));
+ DefaultThreadPool Pool(hardware_concurrency(1));
Pool.async([&]() { EXPECT_THAT_ERROR(Server.listen(), Succeeded()); });
std::string Url = "http://localhost:" + utostr(Port);
HTTPClient Client;
@@ -257,7 +257,7 @@ TEST_F(HTTPClientServerTest, PathMatching) {
Expected<unsigned> PortOrErr = Server.bind();
EXPECT_THAT_EXPECTED(PortOrErr, Succeeded());
unsigned Port = *PortOrErr;
- ThreadPool Pool(hardware_concurrency(1));
+ DefaultThreadPool Pool(hardware_concurrency(1));
Pool.async([&]() { EXPECT_THAT_ERROR(Server.listen(), Succeeded()); });
std::string Url = "http://localhost:" + utostr(Port) + "/abc/1/2";
HTTPRequest Request(Url);
@@ -289,7 +289,7 @@ TEST_F(HTTPClientServerTest, FirstPathMatched) {
Expected<unsigned> PortOrErr = Server.bind();
EXPECT_THAT_EXPECTED(PortOrErr, Succeeded());
unsigned Port = *PortOrErr;
- ThreadPool Pool(hardware_concurrency(1));
+ DefaultThreadPool Pool(hardware_concurrency(1));
Pool.async([&]() { EXPECT_THAT_ERROR(Server.listen(), Succeeded()); });
std::string Url = "http://localhost:" + utostr(Port) + "/abc/1/2";
HTTPRequest Request(Url);
diff --git a/llvm/unittests/Support/ParallelTest.cpp b/llvm/unittests/Support/ParallelTest.cpp
index 53ef9fa25e8266..91250f01a3c117 100644
--- a/llvm/unittests/Support/ParallelTest.cpp
+++ b/llvm/unittests/Support/ParallelTest.cpp
@@ -160,7 +160,7 @@ TEST(Parallel, ParallelNestedTaskGroup) {
});
};
- ThreadPool Pool;
+ DefaultThreadPool Pool;
Pool.async(Fn);
Pool.async(Fn);
diff --git a/llvm/unittests/Support/ThreadPool.cpp b/llvm/unittests/Support/ThreadPool.cpp
index 1da8e056019d8d..d74c625d122950 100644
--- a/llvm/unittests/Support/ThreadPool.cpp
+++ b/llvm/unittests/Support/ThreadPool.cpp
@@ -140,7 +140,7 @@ TYPED_TEST(ThreadPoolTest, AsyncBarrier) {
std::atomic_int checked_in{0};
- TypeParam Pool;
+ DefaultThreadPool Pool;
for (size_t i = 0; i < 5; ++i) {
Pool.async([this, &checked_in] {
this->waitForMainThread();
@@ -160,7 +160,7 @@ TYPED_TEST(ThreadPoolTest, AsyncBarrierArgs) {
// Test that async works with a function requiring multiple parameters.
std::atomic_int checked_in{0};
- ThreadPool Pool;
+ DefaultThreadPool Pool;
for (size_t i = 0; i < 5; ++i) {
Pool.async(TestFunc, std::ref(checked_in), i);
}
@@ -170,7 +170,7 @@ TYPED_TEST(ThreadPoolTest, AsyncBarrierArgs) {
TYPED_TEST(ThreadPoolTest, Async) {
CHECK_UNSUPPORTED();
- ThreadPool Pool;
+ DefaultThreadPool Pool;
std::atomic_int i{0};
Pool.async([this, &i] {
this->waitForMainThread();
@@ -185,7 +185,7 @@ TYPED_TEST(ThreadPoolTest, Async) {
TYPED_TEST(ThreadPoolTest, GetFuture) {
CHECK_UNSUPPORTED();
- ThreadPool Pool(hardware_concurrency(2));
+ DefaultThreadPool Pool(hardware_concurrency(2));
std::atomic_int i{0};
Pool.async([this, &i] {
this->waitForMainThread();
@@ -201,7 +201,7 @@ TYPED_TEST(ThreadPoolTest, GetFuture) {
TYPED_TEST(ThreadPoolTest, GetFutureWithResult) {
CHECK_UNSUPPORTED();
- ThreadPool Pool(hardware_concurrency(2));
+ DefaultThreadPool Pool(hardware_concurrency(2));
auto F1 = Pool.async([] { return 1; });
auto F2 = Pool.async([] { return 2; });
@@ -213,7 +213,7 @@ TYPED_TEST(ThreadPoolTest, GetFutureWithResult) {
TYPED_TEST(ThreadPoolTest, GetFutureWithResultAndArgs) {
CHECK_UNSUPPORTED();
- ThreadPool Pool(hardware_concurrency(2));
+ DefaultThreadPool Pool(hardware_concurrency(2));
auto Fn = [](int x) { return x; };
auto F1 = Pool.async(Fn, 1);
auto F2 = Pool.async(Fn, 2);
@@ -229,7 +229,7 @@ TYPED_TEST(ThreadPoolTest, PoolDestruction) {
// Test that we are waiting on destruction
std::atomic_int checked_in{0};
{
- ThreadPool Pool;
+ DefaultThreadPool Pool;
for (size_t i = 0; i < 5; ++i) {
Pool.async([this, &checked_in] {
this->waitForMainThread();
@@ -250,7 +250,7 @@ TYPED_TEST(ThreadPoolTest, Groups) {
ThreadPoolStrategy S = hardware_concurrency(2);
if (S.compute_thread_count() < 2)
GTEST_SKIP();
- ThreadPool Pool(S);
+ DefaultThreadPool Pool(S);
typename TestFixture::PhaseResetHelper Helper(this);
ThreadPoolTaskGroup Group1(Pool);
ThreadPoolTaskGroup Group2(Pool);
@@ -288,7 +288,7 @@ TYPED_TEST(ThreadPoolTest, Groups) {
// Check recursive tasks.
TYPED_TEST(ThreadPoolTest, RecursiveGroups) {
CHECK_UNSUPPORTED();
- ThreadPool Pool;
+ DefaultThreadPool Pool;
ThreadPoolTaskGroup Group(Pool);
std::atomic_int checked_in1{0};
@@ -323,7 +323,7 @@ TYPED_TEST(ThreadPoolTest, RecursiveWaitDeadlock) {
ThreadPoolStrategy S = hardware_concurrency(2);
if (S.compute_thread_count() < 2)
GTEST_SKIP();
- ThreadPool Pool(S);
+ DefaultThreadPool Pool(S);
typename TestFixture::PhaseResetHelper Helper(this);
ThreadPoolTaskGroup Group(Pool);
@@ -378,7 +378,7 @@ ThreadPoolTest<ThreadPoolImpl>::RunOnAllSockets(ThreadPoolStrategy S) {
std::mutex AllThreadsLock;
unsigned Active = 0;
- ThreadPool Pool(S);
+ DefaultThreadPool Pool(S);
for (size_t I = 0; I < S.compute_thread_count(); ++I) {
Pool.async([&] {
{
diff --git a/llvm/unittests/Support/ThreadSafeAllocatorTest.cpp b/llvm/unittests/Support/ThreadSafeAllocatorTest.cpp
index d9a85b435ebdb7..b3d9430fc0f306 100644
--- a/llvm/unittests/Support/ThreadSafeAllocatorTest.cpp
+++ b/llvm/unittests/Support/ThreadSafeAllocatorTest.cpp
@@ -77,7 +77,7 @@ TEST(ThreadSafeAllocatorTest, AllocWait) {
// Get the allocation from the allocator first since this requires a lock.
Alloc.applyLocked(
[&](MockAllocator &Alloc) { C = &Alloc.getAllocCondition(); });
- ThreadPool Threads;
+ DefaultThreadPool Threads;
// First allocation of 1 byte.
Threads.async([&Alloc]() {
char *P = (char *)Alloc.Allocate(1, alignof(char));
@@ -104,7 +104,7 @@ TEST(ThreadSafeAllocatorTest, AllocWait) {
TEST(ThreadSafeAllocatorTest, AllocWithAlign) {
ThreadSafeAllocator<BumpPtrAllocator> Alloc;
- ThreadPool Threads;
+ DefaultThreadPool Threads;
for (unsigned Index = 1; Index < 100; ++Index)
Threads.async(
@@ -123,7 +123,7 @@ TEST(ThreadSafeAllocatorTest, AllocWithAlign) {
TEST(ThreadSafeAllocatorTest, SpecificBumpPtrAllocator) {
ThreadSafeAllocator<SpecificBumpPtrAllocator<int>> Alloc;
- ThreadPool Threads;
+ DefaultThreadPool Threads;
for (unsigned Index = 1; Index < 100; ++Index)
Threads.async(
diff --git a/mlir/include/mlir/IR/MLIRContext.h b/mlir/include/mlir/IR/MLIRContext.h
index 2ad35d8f78ee35..11e5329f43e681 100644
--- a/mlir/include/mlir/IR/MLIRContext.h
+++ b/mlir/include/mlir/IR/MLIRContext.h
@@ -50,7 +50,7 @@ class IRUnit;
/// To control better thread spawning, an externally owned ThreadPool can be
/// injected in the context. For example:
///
-/// llvm::ThreadPool myThreadPool;
+/// llvm::DefaultThreadPool myThreadPool;
/// while (auto *request = nextCompilationRequests()) {
/// MLIRContext ctx(registry, MLIRContext::Threading::DISABLED);
/// ctx.setThreadPool(myThreadPool);
diff --git a/mlir/lib/CAPI/IR/Support.cpp b/mlir/lib/CAPI/IR/Support.cpp
index 81c9fc77192640..3311131fc2bc83 100644
--- a/mlir/lib/CAPI/IR/Support.cpp
+++ b/mlir/lib/CAPI/IR/Support.cpp
@@ -25,7 +25,7 @@ bool mlirStringRefEqual(MlirStringRef string, MlirStringRef other) {
// LLVM ThreadPool API.
//===----------------------------------------------------------------------===//
MlirLlvmThreadPool mlirLlvmThreadPoolCreate() {
- return wrap(new llvm::ThreadPool());
+ return wrap(new llvm::DefaultThreadPool());
}
void mlirLlvmThreadPoolDestroy(MlirLlvmThreadPool threadPool) {
diff --git a/mlir/lib/ExecutionEngine/AsyncRuntime.cpp b/mlir/lib/ExecutionEngine/AsyncRuntime.cpp
index ec4a81c042c2c2..9e6f8a72169956 100644
--- a/mlir/lib/ExecutionEngine/AsyncRuntime.cpp
+++ b/mlir/lib/ExecutionEngine/AsyncRuntime.cpp
@@ -72,7 +72,7 @@ class AsyncRuntime {
}
std::atomic<int64_t> numRefCountedObjects;
- llvm::ThreadPool threadPool;
+ llvm::DefaultThreadPool threadPool;
};
// -------------------------------------------------------------------------- //
diff --git a/mlir/lib/IR/MLIRContext.cpp b/mlir/lib/IR/MLIRContext.cpp
index 92568bd311e394..e1e6d14231d9f1 100644
--- a/mlir/lib/IR/MLIRContext.cpp
+++ b/mlir/lib/IR/MLIRContext.cpp
@@ -274,7 +274,7 @@ class MLIRContextImpl {
MLIRContextImpl(bool threadingIsEnabled)
: threadingIsEnabled(threadingIsEnabled) {
if (threadingIsEnabled) {
- ownedThreadPool = std::make_unique<llvm::ThreadPool>();
+ ownedThreadPool = std::make_unique<llvm::DefaultThreadPool>();
threadPool = ownedThreadPool.get();
}
}
@@ -621,7 +621,7 @@ void MLIRContext::disableMultithreading(bool disable) {
} else if (!impl->threadPool) {
// The thread pool isn't externally provided.
assert(!impl->ownedThreadPool);
- impl->ownedThreadPool = std::make_unique<llvm::ThreadPool>();
+ impl->ownedThreadPool = std::make_unique<llvm::DefaultThreadPool>();
impl->threadPool = impl->ownedThreadPool.get();
}
}
More information about the lldb-commits
mailing list