[llvm] r367756 - Speculative Compilation
Eric Christopher via llvm-commits
llvm-commits at lists.llvm.org
Mon Aug 12 18:15:44 PDT 2019
FWIW you had a static function in the .h file that isn't used in every
single TU. I fixed it up thusly:
commit 4acb4ee767d0f450f6b87a68e6c9e483ee0744b6
Author: Eric Christopher <echristo at gmail.com>
Date: Tue Aug 13 00:05:01 2019 +0000
Move findBBwithCalls to the file it's used in to avoid unused function
warnings.
llvm-svn: 368636
And I'm surprised that more bots weren't broken. Do try to build with
warnings and errors on by default if you can.
Thanks!
-eric
On Sat, Aug 3, 2019 at 7:41 AM Praveen Velliengiri via llvm-commits
<llvm-commits at lists.llvm.org> wrote:
>
> Author: pree-jackie
> Date: Sat Aug 3 07:42:13 2019
> New Revision: 367756
>
> URL: http://llvm.org/viewvc/llvm-project?rev=367756&view=rev
> Log:
> Speculative Compilation
>
> [ORC] Remove Speculator Variants for Different Program Representations
>
> [ORC] Block Freq Analysis
>
> Speculative Compilation with Naive Block Frequency
>
> Add Applications to OrcSpeculation
>
> ORC v2 with Block Freq Query & Example
>
> Deleted BenchMark Programs
>
> Signed-off-by: preejackie <praveenvelliengiri at gmail.com>
>
> ORCv2 comments resolved
>
> [ORCV2] NFC
>
> ORCv2 NFC
>
> [ORCv2] Speculative compilation - CFGWalkQuery
>
> ORCv2 Adapting IRSpeculationLayer to new locking scheme
>
> Added:
> llvm/trunk/examples/SpeculativeJIT/
> llvm/trunk/examples/SpeculativeJIT/CMakeLists.txt
> llvm/trunk/examples/SpeculativeJIT/SpeculativeJIT.cpp
> llvm/trunk/include/llvm/ExecutionEngine/Orc/SpeculateAnalyses.h
> llvm/trunk/include/llvm/ExecutionEngine/Orc/Speculation.h
> llvm/trunk/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp
> llvm/trunk/lib/ExecutionEngine/Orc/Speculation.cpp
> Modified:
> llvm/trunk/examples/CMakeLists.txt
> llvm/trunk/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
> llvm/trunk/include/llvm/ExecutionEngine/Orc/LazyReexports.h
> llvm/trunk/lib/ExecutionEngine/Orc/CMakeLists.txt
> llvm/trunk/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp
> llvm/trunk/lib/ExecutionEngine/Orc/LazyReexports.cpp
>
> Modified: llvm/trunk/examples/CMakeLists.txt
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/examples/CMakeLists.txt?rev=367756&r1=367755&r2=367756&view=diff
> ==============================================================================
> --- llvm/trunk/examples/CMakeLists.txt (original)
> +++ llvm/trunk/examples/CMakeLists.txt Sat Aug 3 07:42:13 2019
> @@ -5,6 +5,7 @@ add_subdirectory(HowToUseLLJIT)
> add_subdirectory(LLJITExamples)
> add_subdirectory(Kaleidoscope)
> add_subdirectory(ModuleMaker)
> +add_subdirectory(SpeculativeJIT)
>
> if(LLVM_ENABLE_EH AND (NOT WIN32) AND (NOT "${LLVM_NATIVE_ARCH}" STREQUAL "ARM"))
> add_subdirectory(ExceptionDemo)
>
> Added: llvm/trunk/examples/SpeculativeJIT/CMakeLists.txt
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/examples/SpeculativeJIT/CMakeLists.txt?rev=367756&view=auto
> ==============================================================================
> --- llvm/trunk/examples/SpeculativeJIT/CMakeLists.txt (added)
> +++ llvm/trunk/examples/SpeculativeJIT/CMakeLists.txt Sat Aug 3 07:42:13 2019
> @@ -0,0 +1,14 @@
> +set(LLVM_LINK_COMPONENTS
> + Core
> + IRReader
> + OrcJIT
> + ExecutionEngine
> + Support
> + nativecodegen
> + Analysis
> + Passes
> + )
> +
> +add_llvm_example(SpeculativeJIT
> + SpeculativeJIT.cpp
> + )
>
> Added: llvm/trunk/examples/SpeculativeJIT/SpeculativeJIT.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/examples/SpeculativeJIT/SpeculativeJIT.cpp?rev=367756&view=auto
> ==============================================================================
> --- llvm/trunk/examples/SpeculativeJIT/SpeculativeJIT.cpp (added)
> +++ llvm/trunk/examples/SpeculativeJIT/SpeculativeJIT.cpp Sat Aug 3 07:42:13 2019
> @@ -0,0 +1,197 @@
> +#include "llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h"
> +#include "llvm/ExecutionEngine/Orc/CompileUtils.h"
> +#include "llvm/ExecutionEngine/Orc/Core.h"
> +#include "llvm/ExecutionEngine/Orc/ExecutionUtils.h"
> +#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
> +#include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
> +#include "llvm/ExecutionEngine/Orc/JITTargetMachineBuilder.h"
> +#include "llvm/ExecutionEngine/Orc/RTDyldObjectLinkingLayer.h"
> +#include "llvm/ExecutionEngine/Orc/SpeculateAnalyses.h"
> +#include "llvm/ExecutionEngine/Orc/Speculation.h"
> +#include "llvm/ExecutionEngine/SectionMemoryManager.h"
> +#include "llvm/IRReader/IRReader.h"
> +#include "llvm/Support/CommandLine.h"
> +#include "llvm/Support/Debug.h"
> +#include "llvm/Support/InitLLVM.h"
> +#include "llvm/Support/SourceMgr.h"
> +#include "llvm/Support/TargetSelect.h"
> +#include "llvm/Support/ThreadPool.h"
> +
> +#include <list>
> +#include <string>
> +
> +using namespace llvm;
> +using namespace llvm::orc;
> +
> +static cl::list<std::string> InputFiles(cl::Positional, cl::OneOrMore,
> + cl::desc("input files"));
> +
> +static cl::list<std::string> InputArgv("args", cl::Positional,
> + cl::desc("<program arguments>..."),
> + cl::ZeroOrMore, cl::PositionalEatsArgs);
> +
> +static cl::opt<unsigned> NumThreads("num-threads", cl::Optional,
> + cl::desc("Number of compile threads"),
> + cl::init(4));
> +
> +ExitOnError ExitOnErr;
> +
> +// Add Layers
> +class SpeculativeJIT {
> +public:
> + static Expected<std::unique_ptr<SpeculativeJIT>> Create() {
> + auto JTMB = orc::JITTargetMachineBuilder::detectHost();
> + if (!JTMB)
> + return JTMB.takeError();
> +
> + auto DL = JTMB->getDefaultDataLayoutForTarget();
> + if (!DL)
> + return DL.takeError();
> +
> + auto ES = llvm::make_unique<ExecutionSession>();
> +
> + auto LCTMgr = createLocalLazyCallThroughManager(
> + JTMB->getTargetTriple(), *ES,
> + pointerToJITTargetAddress(explodeOnLazyCompileFailure));
> + if (!LCTMgr)
> + return LCTMgr.takeError();
> +
> + auto ISMBuilder =
> + createLocalIndirectStubsManagerBuilder(JTMB->getTargetTriple());
> + if (!ISMBuilder)
> + return make_error<StringError>("No indirect stubs manager for target",
> + inconvertibleErrorCode());
> +
> + auto ProcessSymbolsSearchGenerator =
> + DynamicLibrarySearchGenerator::GetForCurrentProcess(
> + DL->getGlobalPrefix());
> + if (!ProcessSymbolsSearchGenerator)
> + return ProcessSymbolsSearchGenerator.takeError();
> +
> + std::unique_ptr<SpeculativeJIT> SJ(new SpeculativeJIT(
> + std::move(ES), std::move(*DL), std::move(*JTMB), std::move(*LCTMgr),
> + std::move(ISMBuilder), std::move(*ProcessSymbolsSearchGenerator)));
> + return std::move(SJ);
> + }
> +
> + ExecutionSession &getES() { return *ES; }
> +
> + Error addModule(JITDylib &JD, ThreadSafeModule TSM) {
> + return CODLayer.add(JD, std::move(TSM));
> + }
> +
> + Expected<JITEvaluatedSymbol> lookup(StringRef UnmangledName) {
> + return ES->lookup({&ES->getMainJITDylib()}, Mangle(UnmangledName));
> + }
> +
> + ~SpeculativeJIT() { CompileThreads.wait(); }
> +
> +private:
> + using IndirectStubsManagerBuilderFunction =
> + std::function<std::unique_ptr<IndirectStubsManager>()>;
> +
> + static void explodeOnLazyCompileFailure() {
> + errs() << "Lazy compilation failed, Symbol Implmentation not found!\n";
> + exit(1);
> + }
> +
> + SpeculativeJIT(std::unique_ptr<ExecutionSession> ES, DataLayout DL,
> + orc::JITTargetMachineBuilder JTMB,
> + std::unique_ptr<LazyCallThroughManager> LCTMgr,
> + IndirectStubsManagerBuilderFunction ISMBuilder,
> + DynamicLibrarySearchGenerator ProcessSymbolsGenerator)
> + : ES(std::move(ES)), DL(std::move(DL)), LCTMgr(std::move(LCTMgr)),
> + CompileLayer(*this->ES, ObjLayer,
> + ConcurrentIRCompiler(std::move(JTMB))),
> + S(Imps, *this->ES),
> + SpeculateLayer(*this->ES, CompileLayer, S, BlockFreqQuery()),
> + CODLayer(*this->ES, SpeculateLayer, *this->LCTMgr,
> + std::move(ISMBuilder)) {
> + this->ES->getMainJITDylib().setGenerator(
> + std::move(ProcessSymbolsGenerator));
> + this->CODLayer.setImplMap(&Imps);
> + this->ES->setDispatchMaterialization(
> +
> + [this](JITDylib &JD, std::unique_ptr<MaterializationUnit> MU) {
> + // FIXME: Switch to move capture once we have c 14.
> + auto SharedMU = std::shared_ptr<MaterializationUnit>(std::move(MU));
> + auto Work = [SharedMU, &JD]() { SharedMU->doMaterialize(JD); };
> + CompileThreads.async(std::move(Work));
> + });
> + JITEvaluatedSymbol SpeculatorSymbol(JITTargetAddress(&S),
> + JITSymbolFlags::Exported);
> + ExitOnErr(this->ES->getMainJITDylib().define(
> + absoluteSymbols({{Mangle("__orc_speculator"), SpeculatorSymbol}})));
> + LocalCXXRuntimeOverrides CXXRuntimeoverrides;
> + ExitOnErr(CXXRuntimeoverrides.enable(this->ES->getMainJITDylib(), Mangle));
> + }
> +
> + static std::unique_ptr<SectionMemoryManager> createMemMgr() {
> + return llvm::make_unique<SectionMemoryManager>();
> + }
> +
> + std::unique_ptr<ExecutionSession> ES;
> + DataLayout DL;
> + MangleAndInterner Mangle{*ES, DL};
> + ThreadPool CompileThreads{NumThreads};
> +
> + Triple TT;
> + std::unique_ptr<LazyCallThroughManager> LCTMgr;
> + IRCompileLayer CompileLayer;
> + ImplSymbolMap Imps;
> + Speculator S;
> + RTDyldObjectLinkingLayer ObjLayer{*ES, createMemMgr};
> + IRSpeculationLayer SpeculateLayer;
> + CompileOnDemandLayer CODLayer;
> +};
> +
> +int main(int argc, char *argv[]) {
> + // Initialize LLVM.
> + InitLLVM X(argc, argv);
> +
> + InitializeNativeTarget();
> + InitializeNativeTargetAsmPrinter();
> +
> + cl::ParseCommandLineOptions(argc, argv, "SpeculativeJIT");
> + ExitOnErr.setBanner(std::string(argv[0]) + ": ");
> +
> + if (NumThreads < 1) {
> + errs() << "Speculative compilation requires one or more dedicated compile "
> + "threads\n";
> + return 1;
> + }
> +
> + // Create a JIT instance.
> + auto SJ = ExitOnErr(SpeculativeJIT::Create());
> +
> + // Load the IR inputs.
> + for (const auto &InputFile : InputFiles) {
> + SMDiagnostic Err;
> + auto Ctx = llvm::make_unique<LLVMContext>();
> + auto M = parseIRFile(InputFile, Err, *Ctx);
> + if (!M) {
> + Err.print(argv[0], errs());
> + return 1;
> + }
> +
> + ExitOnErr(SJ->addModule(SJ->getES().getMainJITDylib(),
> + ThreadSafeModule(std::move(M), std::move(Ctx))));
> + }
> +
> + // Build an argv array for the JIT'd main.
> + std::vector<const char *> ArgV;
> + ArgV.push_back(argv[0]);
> + for (const auto &InputArg : InputArgv)
> + ArgV.push_back(InputArg.data());
> + ArgV.push_back(nullptr);
> +
> + // Look up the JIT'd main, cast it to a function pointer, then call it.
> +
> + auto MainSym = ExitOnErr(SJ->lookup("main"));
> + int (*Main)(int, const char *[]) =
> + (int (*)(int, const char *[]))MainSym.getAddress();
> +
> + Main(ArgV.size() - 1, ArgV.data());
> +
> + return 0;
> +}
>
> Modified: llvm/trunk/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h?rev=367756&r1=367755&r2=367756&view=diff
> ==============================================================================
> --- llvm/trunk/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h (original)
> +++ llvm/trunk/include/llvm/ExecutionEngine/Orc/CompileOnDemandLayer.h Sat Aug 3 07:42:13 2019
> @@ -26,6 +26,7 @@
> #include "llvm/ExecutionEngine/Orc/LazyReexports.h"
> #include "llvm/ExecutionEngine/Orc/Legacy.h"
> #include "llvm/ExecutionEngine/Orc/OrcError.h"
> +#include "llvm/ExecutionEngine/Orc/Speculation.h"
> #include "llvm/ExecutionEngine/RuntimeDyld.h"
> #include "llvm/IR/Attributes.h"
> #include "llvm/IR/Constant.h"
> @@ -91,6 +92,8 @@ public:
> /// Sets the partition function.
> void setPartitionFunction(PartitionFunction Partition);
>
> + /// Sets the ImplSymbolMap
> + void setImplMap(ImplSymbolMap *Imp);
> /// Emits the given module. This should not be called by clients: it will be
> /// called by the JIT when a definition added via the add method is requested.
> void emit(MaterializationResponsibility R, ThreadSafeModule TSM) override;
> @@ -128,6 +131,7 @@ private:
> PerDylibResourcesMap DylibResources;
> PartitionFunction Partition = compileRequested;
> SymbolLinkagePromoter PromoteSymbols;
> + ImplSymbolMap *AliaseeImpls = nullptr;
> };
>
> /// Compile-on-demand layer.
>
> Modified: llvm/trunk/include/llvm/ExecutionEngine/Orc/LazyReexports.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/ExecutionEngine/Orc/LazyReexports.h?rev=367756&r1=367755&r2=367756&view=diff
> ==============================================================================
> --- llvm/trunk/include/llvm/ExecutionEngine/Orc/LazyReexports.h (original)
> +++ llvm/trunk/include/llvm/ExecutionEngine/Orc/LazyReexports.h Sat Aug 3 07:42:13 2019
> @@ -18,6 +18,7 @@
>
> #include "llvm/ExecutionEngine/Orc/Core.h"
> #include "llvm/ExecutionEngine/Orc/IndirectionUtils.h"
> +#include "llvm/ExecutionEngine/Orc/Speculation.h"
>
> namespace llvm {
>
> @@ -159,7 +160,7 @@ public:
> IndirectStubsManager &ISManager,
> JITDylib &SourceJD,
> SymbolAliasMap CallableAliases,
> - VModuleKey K);
> + ImplSymbolMap *SrcJDLoc, VModuleKey K);
>
> StringRef getName() const override;
>
> @@ -174,6 +175,7 @@ private:
> SymbolAliasMap CallableAliases;
> std::shared_ptr<LazyCallThroughManager::NotifyResolvedFunction>
> NotifyResolved;
> + ImplSymbolMap *AliaseeTable;
> };
>
> /// Define lazy-reexports based on the given SymbolAliasMap. Each lazy re-export
> @@ -182,9 +184,10 @@ private:
> inline std::unique_ptr<LazyReexportsMaterializationUnit>
> lazyReexports(LazyCallThroughManager &LCTManager,
> IndirectStubsManager &ISManager, JITDylib &SourceJD,
> - SymbolAliasMap CallableAliases, VModuleKey K = VModuleKey()) {
> + SymbolAliasMap CallableAliases, ImplSymbolMap *SrcJDLoc = nullptr,
> + VModuleKey K = VModuleKey()) {
> return llvm::make_unique<LazyReexportsMaterializationUnit>(
> - LCTManager, ISManager, SourceJD, std::move(CallableAliases),
> + LCTManager, ISManager, SourceJD, std::move(CallableAliases), SrcJDLoc,
> std::move(K));
> }
>
>
> Added: llvm/trunk/include/llvm/ExecutionEngine/Orc/SpeculateAnalyses.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/ExecutionEngine/Orc/SpeculateAnalyses.h?rev=367756&view=auto
> ==============================================================================
> --- llvm/trunk/include/llvm/ExecutionEngine/Orc/SpeculateAnalyses.h (added)
> +++ llvm/trunk/include/llvm/ExecutionEngine/Orc/SpeculateAnalyses.h Sat Aug 3 07:42:13 2019
> @@ -0,0 +1,72 @@
> +//===-- SpeculateAnalyses.h --*- C++ -*-===//
> +//
> +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
> +// See https://llvm.org/LICENSE.txt for license information.
> +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
> +//
> +//===----------------------------------------------------------------------===//
> +// \file
> +/// Contains the Analyses and Result Interpretation to select likely functions
> +/// to Speculatively compile before they are called. [Experimentation]
> +//===----------------------------------------------------------------------===//
> +
> +#ifndef LLVM_EXECUTIONENGINE_ORC_SPECULATEANALYSES_H
> +#define LLVM_EXECUTIONENGINE_ORC_SPECULATEANALYSES_H
> +
> +#include "llvm/ExecutionEngine/Orc/Core.h"
> +#include "llvm/ExecutionEngine/Orc/Speculation.h"
> +
> +#include <vector>
> +
> +namespace {
> +using namespace llvm;
> +std::vector<const BasicBlock *> findBBwithCalls(const Function &F,
> + bool IndirectCall = false) {
> + std::vector<const BasicBlock *> BBs;
> +
> + auto findCallInst = [&IndirectCall](const Instruction &I) {
> + if (auto Call = dyn_cast<CallBase>(&I)) {
> + if (Call->isIndirectCall())
> + return IndirectCall;
> + else
> + return true;
> + } else
> + return false;
> + };
> + for (auto &BB : F)
> + if (findCallInst(*BB.getTerminator()) ||
> + llvm::any_of(BB.instructionsWithoutDebug(), findCallInst))
> + BBs.emplace_back(&BB);
> +
> + return BBs;
> +}
> +} // namespace
> +
> +namespace llvm {
> +
> +namespace orc {
> +
> +// Direct calls in high frequency basic blocks are extracted.
> +class BlockFreqQuery {
> +private:
> + void findCalles(const BasicBlock *, DenseSet<StringRef> &);
> + size_t numBBToGet(size_t);
> +
> +public:
> + using ResultTy = Optional<DenseMap<StringRef, DenseSet<StringRef>>>;
> +
> + // Find likely next executables based on IR Block Frequency
> + ResultTy operator()(Function &F, FunctionAnalysisManager &FAM);
> +};
> +
> +// Walk the CFG by exploting BranchProbabilityInfo
> +class CFGWalkQuery {
> +public:
> + using ResultTy = Optional<DenseMap<StringRef, DenseSet<StringRef>>>;
> + ResultTy operator()(Function &F, FunctionAnalysisManager &FAM);
> +};
> +
> +} // namespace orc
> +} // namespace llvm
> +
> +#endif // LLVM_EXECUTIONENGINE_ORC_SPECULATEANALYSES_H
>
> Added: llvm/trunk/include/llvm/ExecutionEngine/Orc/Speculation.h
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/include/llvm/ExecutionEngine/Orc/Speculation.h?rev=367756&view=auto
> ==============================================================================
> --- llvm/trunk/include/llvm/ExecutionEngine/Orc/Speculation.h (added)
> +++ llvm/trunk/include/llvm/ExecutionEngine/Orc/Speculation.h Sat Aug 3 07:42:13 2019
> @@ -0,0 +1,208 @@
> +//===-- Speculation.h - Speculative Compilation --*- C++ -*-===//
> +//
> +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
> +// See https://llvm.org/LICENSE.txt for license information.
> +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
> +//
> +//===----------------------------------------------------------------------===//
> +//
> +// Contains the definition to support speculative compilation when laziness is
> +// enabled.
> +//===----------------------------------------------------------------------===//
> +
> +#ifndef LLVM_EXECUTIONENGINE_ORC_SPECULATION_H
> +#define LLVM_EXECUTIONENGINE_ORC_SPECULATION_H
> +
> +#include "llvm/ADT/ArrayRef.h"
> +#include "llvm/ADT/DenseMap.h"
> +#include "llvm/ADT/Optional.h"
> +#include "llvm/ExecutionEngine/Orc/Core.h"
> +#include "llvm/ExecutionEngine/Orc/IRCompileLayer.h"
> +#include "llvm/IR/PassManager.h"
> +#include "llvm/Passes/PassBuilder.h"
> +
> +#include <mutex>
> +#include <type_traits>
> +#include <utility>
> +#include <vector>
> +
> +namespace llvm {
> +namespace orc {
> +
> +class Speculator;
> +
> +// Track the Impls (JITDylib,Symbols) of Symbols while lazy call through
> +// trampolines are created. Operations are guarded by locks tp ensure that Imap
> +// stays in consistent state after read/write
> +
> +class ImplSymbolMap {
> + friend class Speculator;
> +
> +public:
> + using AliaseeDetails = std::pair<SymbolStringPtr, JITDylib *>;
> + using Alias = SymbolStringPtr;
> + using ImapTy = DenseMap<Alias, AliaseeDetails>;
> + void trackImpls(SymbolAliasMap ImplMaps, JITDylib *SrcJD);
> +
> +private:
> + // FIX ME: find a right way to distinguish the pre-compile Symbols, and update
> + // the callsite
> + Optional<AliaseeDetails> getImplFor(const SymbolStringPtr &StubSymbol) {
> + std::lock_guard<std::mutex> Lockit(ConcurrentAccess);
> + auto Position = Maps.find(StubSymbol);
> + if (Position != Maps.end())
> + return Position->getSecond();
> + else
> + return None;
> + }
> +
> + std::mutex ConcurrentAccess;
> + ImapTy Maps;
> +};
> +
> +// Defines Speculator Concept,
> +class Speculator {
> +public:
> + using TargetFAddr = JITTargetAddress;
> + using FunctionCandidatesMap = DenseMap<SymbolStringPtr, SymbolNameSet>;
> + using StubAddrLikelies = DenseMap<TargetFAddr, SymbolNameSet>;
> +
> +private:
> + void registerSymbolsWithAddr(TargetFAddr ImplAddr,
> + SymbolNameSet likelySymbols) {
> + std::lock_guard<std::mutex> Lockit(ConcurrentAccess);
> + GlobalSpecMap.insert({ImplAddr, std::move(likelySymbols)});
> + }
> +
> + void launchCompile(JITTargetAddress FAddr) {
> + SymbolNameSet CandidateSet;
> + // Copy CandidateSet is necessary, to avoid unsynchronized access to
> + // the datastructure.
> + {
> + std::lock_guard<std::mutex> Lockit(ConcurrentAccess);
> + auto It = GlobalSpecMap.find(FAddr);
> + // Kill this when jump on first call instrumentation is in place;
> + auto Iv = AlreadyExecuted.insert(FAddr);
> + if (It == GlobalSpecMap.end() || Iv.second == false)
> + return;
> + else
> + CandidateSet = It->getSecond();
> + }
> +
> + // Try to distinguish pre-compiled symbols!
> + for (auto &Callee : CandidateSet) {
> + auto ImplSymbol = AliaseeImplTable.getImplFor(Callee);
> + if (!ImplSymbol.hasValue())
> + continue;
> + const auto &ImplSymbolName = ImplSymbol.getPointer()->first;
> + auto *ImplJD = ImplSymbol.getPointer()->second;
> + ES.lookup(JITDylibSearchList({{ImplJD, true}}),
> + SymbolNameSet({ImplSymbolName}), SymbolState::Ready,
> + [this](Expected<SymbolMap> Result) {
> + if (auto Err = Result.takeError())
> + ES.reportError(std::move(Err));
> + },
> + NoDependenciesToRegister);
> + }
> + }
> +
> +public:
> + Speculator(ImplSymbolMap &Impl, ExecutionSession &ref)
> + : AliaseeImplTable(Impl), ES(ref), GlobalSpecMap(0) {}
> + Speculator(const Speculator &) = delete;
> + Speculator(Speculator &&) = delete;
> + Speculator &operator=(const Speculator &) = delete;
> + Speculator &operator=(Speculator &&) = delete;
> + ~Speculator() {}
> +
> + // Speculatively compile likely functions for the given Stub Address.
> + // destination of __orc_speculate_for jump
> + void speculateFor(TargetFAddr StubAddr) { launchCompile(StubAddr); }
> +
> + // FIXME : Register with Stub Address, after JITLink Fix.
> + void registerSymbols(FunctionCandidatesMap Candidates, JITDylib *JD) {
> + for (auto &SymPair : Candidates) {
> + auto Target = SymPair.first;
> + auto Likely = SymPair.second;
> +
> + auto OnReadyFixUp = [Likely, Target,
> + this](Expected<SymbolMap> ReadySymbol) {
> + if (ReadySymbol) {
> + auto RAddr = (*ReadySymbol)[Target].getAddress();
> + registerSymbolsWithAddr(RAddr, std::move(Likely));
> + } else
> + this->getES().reportError(ReadySymbol.takeError());
> + };
> + // Include non-exported symbols also.
> + ES.lookup(JITDylibSearchList({{JD, true}}), SymbolNameSet({Target}),
> + SymbolState::Ready, OnReadyFixUp, NoDependenciesToRegister);
> + }
> + }
> +
> + ExecutionSession &getES() { return ES; }
> +
> +private:
> + std::mutex ConcurrentAccess;
> + ImplSymbolMap &AliaseeImplTable;
> + ExecutionSession &ES;
> + DenseSet<TargetFAddr> AlreadyExecuted;
> + StubAddrLikelies GlobalSpecMap;
> +};
> +// replace DenseMap with Pair
> +class IRSpeculationLayer : public IRLayer {
> +public:
> + using IRlikiesStrRef = Optional<DenseMap<StringRef, DenseSet<StringRef>>>;
> + using ResultEval =
> + std::function<IRlikiesStrRef(Function &, FunctionAnalysisManager &)>;
> + using TargetAndLikelies = DenseMap<SymbolStringPtr, SymbolNameSet>;
> +
> + IRSpeculationLayer(ExecutionSession &ES, IRCompileLayer &BaseLayer,
> + Speculator &Spec, ResultEval Interpreter)
> + : IRLayer(ES), NextLayer(BaseLayer), S(Spec), QueryAnalysis(Interpreter) {
> + PB.registerFunctionAnalyses(FAM);
> + }
> +
> + template <
> + typename AnalysisTy,
> + typename std::enable_if<
> + std::is_base_of<AnalysisInfoMixin<AnalysisTy>, AnalysisTy>::value,
> + bool>::type = true>
> + void registerAnalysis() {
> + FAM.registerPass([]() { return AnalysisTy(); });
> + }
> +
> + void emit(MaterializationResponsibility R, ThreadSafeModule TSM);
> +
> +private:
> + TargetAndLikelies
> + internToJITSymbols(DenseMap<StringRef, DenseSet<StringRef>> IRNames) {
> + assert(!IRNames.empty() && "No IRNames received to Intern?");
> + TargetAndLikelies InternedNames;
> + DenseSet<SymbolStringPtr> TargetJITNames;
> + ExecutionSession &Es = getExecutionSession();
> + for (auto &NamePair : IRNames) {
> + for (auto &TargetNames : NamePair.second)
> + TargetJITNames.insert(Es.intern(TargetNames));
> +
> + InternedNames.insert(
> + {Es.intern(NamePair.first), std::move(TargetJITNames)});
> + }
> + return InternedNames;
> + }
> +
> + IRCompileLayer &NextLayer;
> + Speculator &S;
> + PassBuilder PB;
> + FunctionAnalysisManager FAM;
> + ResultEval QueryAnalysis;
> +};
> +
> +// Runtime Function Interface
> +extern "C" {
> +void __orc_speculate_for(Speculator *, uint64_t stub_id);
> +}
> +
> +} // namespace orc
> +} // namespace llvm
> +
> +#endif // LLVM_EXECUTIONENGINE_ORC_SPECULATION_H
>
> Modified: llvm/trunk/lib/ExecutionEngine/Orc/CMakeLists.txt
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/ExecutionEngine/Orc/CMakeLists.txt?rev=367756&r1=367755&r2=367756&view=diff
> ==============================================================================
> --- llvm/trunk/lib/ExecutionEngine/Orc/CMakeLists.txt (original)
> +++ llvm/trunk/lib/ExecutionEngine/Orc/CMakeLists.txt Sat Aug 3 07:42:13 2019
> @@ -21,7 +21,8 @@ add_llvm_library(LLVMOrcJIT
> RPCUtils.cpp
> RTDyldObjectLinkingLayer.cpp
> ThreadSafeModule.cpp
> -
> + Speculation.cpp
> + SpeculateAnalyses.cpp
> ADDITIONAL_HEADER_DIRS
> ${LLVM_MAIN_INCLUDE_DIR}/llvm/ExecutionEngine/Orc
>
> @@ -31,6 +32,7 @@ add_llvm_library(LLVMOrcJIT
>
> target_link_libraries(LLVMOrcJIT
> PRIVATE
> + LLVMAnalysis
> LLVMBitReader
> LLVMBitWriter
> )
>
> Modified: llvm/trunk/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp?rev=367756&r1=367755&r2=367756&view=diff
> ==============================================================================
> --- llvm/trunk/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp (original)
> +++ llvm/trunk/lib/ExecutionEngine/Orc/CompileOnDemandLayer.cpp Sat Aug 3 07:42:13 2019
> @@ -118,6 +118,9 @@ void CompileOnDemandLayer::setPartitionF
> this->Partition = std::move(Partition);
> }
>
> +void CompileOnDemandLayer::setImplMap(ImplSymbolMap *Imp) {
> + this->AliaseeImpls = Imp;
> +}
> void CompileOnDemandLayer::emit(MaterializationResponsibility R,
> ThreadSafeModule TSM) {
> assert(TSM && "Null module");
> @@ -161,7 +164,7 @@ void CompileOnDemandLayer::emit(Material
>
> R.replace(reexports(PDR.getImplDylib(), std::move(NonCallables), true));
> R.replace(lazyReexports(LCTMgr, PDR.getISManager(), PDR.getImplDylib(),
> - std::move(Callables)));
> + std::move(Callables), AliaseeImpls));
> }
>
> CompileOnDemandLayer::PerDylibResources &
>
> Modified: llvm/trunk/lib/ExecutionEngine/Orc/LazyReexports.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/ExecutionEngine/Orc/LazyReexports.cpp?rev=367756&r1=367755&r2=367756&view=diff
> ==============================================================================
> --- llvm/trunk/lib/ExecutionEngine/Orc/LazyReexports.cpp (original)
> +++ llvm/trunk/lib/ExecutionEngine/Orc/LazyReexports.cpp Sat Aug 3 07:42:13 2019
> @@ -50,7 +50,6 @@ LazyCallThroughManager::callThroughToSym
> SourceJD = I->second.first;
> SymbolName = I->second.second;
> }
> -
> auto LookupResult =
> ES.lookup(JITDylibSearchList({{SourceJD, true}}), SymbolName);
>
> @@ -121,7 +120,8 @@ createLocalLazyCallThroughManager(const
>
> LazyReexportsMaterializationUnit::LazyReexportsMaterializationUnit(
> LazyCallThroughManager &LCTManager, IndirectStubsManager &ISManager,
> - JITDylib &SourceJD, SymbolAliasMap CallableAliases, VModuleKey K)
> + JITDylib &SourceJD, SymbolAliasMap CallableAliases, ImplSymbolMap *SrcJDLoc,
> + VModuleKey K)
> : MaterializationUnit(extractFlags(CallableAliases), std::move(K)),
> LCTManager(LCTManager), ISManager(ISManager), SourceJD(SourceJD),
> CallableAliases(std::move(CallableAliases)),
> @@ -129,7 +129,8 @@ LazyReexportsMaterializationUnit::LazyRe
> [&ISManager](JITDylib &JD, const SymbolStringPtr &SymbolName,
> JITTargetAddress ResolvedAddr) {
> return ISManager.updatePointer(*SymbolName, ResolvedAddr);
> - })) {}
> + })),
> + AliaseeTable(SrcJDLoc) {}
>
> StringRef LazyReexportsMaterializationUnit::getName() const {
> return "<Lazy Reexports>";
> @@ -149,7 +150,7 @@ void LazyReexportsMaterializationUnit::m
>
> if (!CallableAliases.empty())
> R.replace(lazyReexports(LCTManager, ISManager, SourceJD,
> - std::move(CallableAliases)));
> + std::move(CallableAliases), AliaseeTable));
>
> IndirectStubsManager::StubInitsMap StubInits;
> for (auto &Alias : RequestedAliases) {
> @@ -168,6 +169,9 @@ void LazyReexportsMaterializationUnit::m
> std::make_pair(*CallThroughTrampoline, Alias.second.AliasFlags);
> }
>
> + if (AliaseeTable != nullptr && !RequestedAliases.empty())
> + AliaseeTable->trackImpls(RequestedAliases, &SourceJD);
> +
> if (auto Err = ISManager.createStubs(StubInits)) {
> SourceJD.getExecutionSession().reportError(std::move(Err));
> R.failMaterialization();
>
> Added: llvm/trunk/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp?rev=367756&view=auto
> ==============================================================================
> --- llvm/trunk/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp (added)
> +++ llvm/trunk/lib/ExecutionEngine/Orc/SpeculateAnalyses.cpp Sat Aug 3 07:42:13 2019
> @@ -0,0 +1,87 @@
> +//===-- SpeculateAnalyses.cpp --*- C++ -*-===//
> +//
> +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
> +// See https://llvm.org/LICENSE.txt for license information.
> +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
> +//
> +//===----------------------------------------------------------------------===//
> +
> +#include "llvm/ExecutionEngine/Orc/SpeculateAnalyses.h"
> +#include "llvm/ADT/DenseMap.h"
> +#include "llvm/ADT/STLExtras.h"
> +#include "llvm/ADT/SmallVector.h"
> +#include "llvm/Analysis/BlockFrequencyInfo.h"
> +
> +// Implementations of Queries shouldn't need to lock the resources
> +// such as LLVMContext, each argument (function) has a non-shared LLVMContext
> +namespace llvm {
> +namespace orc {
> +
> +// Collect direct calls only
> +void BlockFreqQuery::findCalles(const BasicBlock *BB,
> + DenseSet<StringRef> &CallesNames) {
> + assert(BB != nullptr && "Traversing Null BB to find calls?");
> +
> + auto getCalledFunction = [&CallesNames](const CallBase *Call) {
> + auto CalledValue = Call->getCalledOperand()->stripPointerCasts();
> + if (auto DirectCall = dyn_cast<Function>(CalledValue))
> + CallesNames.insert(DirectCall->getName());
> + };
> + for (auto &I : BB->instructionsWithoutDebug())
> + if (auto CI = dyn_cast<CallInst>(&I))
> + getCalledFunction(CI);
> +
> + if (auto II = dyn_cast<InvokeInst>(BB->getTerminator()))
> + getCalledFunction(II);
> +}
> +
> +// blind calculation
> +size_t BlockFreqQuery::numBBToGet(size_t numBB) {
> + // small CFG
> + if (numBB < 4)
> + return numBB;
> + // mid-size CFG
> + else if (numBB < 20)
> + return (numBB / 2);
> + else
> + return (numBB / 2) + (numBB / 4);
> +}
> +
> +BlockFreqQuery::ResultTy BlockFreqQuery::
> +operator()(Function &F, FunctionAnalysisManager &FAM) {
> + DenseMap<StringRef, DenseSet<StringRef>> CallerAndCalles;
> + DenseSet<StringRef> Calles;
> + SmallVector<std::pair<const BasicBlock *, uint64_t>, 8> BBFreqs;
> +
> + auto IBBs = findBBwithCalls(F);
> +
> + if (IBBs.empty())
> + return None;
> +
> + auto &BFI = FAM.getResult<BlockFrequencyAnalysis>(F);
> +
> + for (const auto I : IBBs)
> + BBFreqs.push_back({I, BFI.getBlockFreq(I).getFrequency()});
> +
> + assert(IBBs.size() == BBFreqs.size() && "BB Count Mismatch");
> +
> + llvm::sort(BBFreqs.begin(), BBFreqs.end(),
> + [](decltype(BBFreqs)::const_reference BBF,
> + decltype(BBFreqs)::const_reference BBS) {
> + return BBF.second > BBS.second ? true : false;
> + });
> +
> + // ignoring number of direct calls in a BB
> + auto Topk = numBBToGet(BBFreqs.size());
> +
> + for (size_t i = 0; i < Topk; i++)
> + findCalles(BBFreqs[i].first, Calles);
> +
> + assert(!Calles.empty() && "Running Analysis on Function with no calls?");
> +
> + CallerAndCalles.insert({F.getName(), std::move(Calles)});
> +
> + return CallerAndCalles;
> +}
> +} // namespace orc
> +} // namespace llvm
>
> Added: llvm/trunk/lib/ExecutionEngine/Orc/Speculation.cpp
> URL: http://llvm.org/viewvc/llvm-project/llvm/trunk/lib/ExecutionEngine/Orc/Speculation.cpp?rev=367756&view=auto
> ==============================================================================
> --- llvm/trunk/lib/ExecutionEngine/Orc/Speculation.cpp (added)
> +++ llvm/trunk/lib/ExecutionEngine/Orc/Speculation.cpp Sat Aug 3 07:42:13 2019
> @@ -0,0 +1,97 @@
> +//===---------- speculation.cpp - Utilities for Speculation ----------===//
> +//
> +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
> +// See https://llvm.org/LICENSE.txt for license information.
> +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
> +//
> +//===----------------------------------------------------------------------===//
> +
> +#include "llvm/ExecutionEngine/Orc/Speculation.h"
> +
> +#include "llvm/IR/BasicBlock.h"
> +#include "llvm/IR/Function.h"
> +#include "llvm/IR/IRBuilder.h"
> +#include "llvm/IR/Instruction.h"
> +#include "llvm/IR/Instructions.h"
> +#include "llvm/IR/LLVMContext.h"
> +#include "llvm/IR/Module.h"
> +#include "llvm/IR/Type.h"
> +#include "llvm/IR/Verifier.h"
> +
> +#include <vector>
> +
> +namespace llvm {
> +
> +namespace orc {
> +
> +// ImplSymbolMap methods
> +void ImplSymbolMap::trackImpls(SymbolAliasMap ImplMaps, JITDylib *SrcJD) {
> + assert(SrcJD && "Tracking on Null Source .impl dylib");
> + std::lock_guard<std::mutex> Lockit(ConcurrentAccess);
> + for (auto &I : ImplMaps) {
> + auto It = Maps.insert({I.first, {I.second.Aliasee, SrcJD}});
> + // check rationale when independent dylibs have same symbol name?
> + assert(It.second && "ImplSymbols are already tracked for this Symbol?");
> + (void)(It);
> + }
> +}
> +
> +// If two modules, share the same LLVMContext, different threads must
> +// not access those modules concurrently, doing so leave the
> +// LLVMContext in in-consistent state.
> +// But here since each TSM has a unique Context associated with it,
> +// on locking is necessary!
> +void IRSpeculationLayer::emit(MaterializationResponsibility R,
> + ThreadSafeModule TSM) {
> +
> + assert(TSM && "Speculation Layer received Null Module ?");
> + assert(TSM.getContext().getContext() != nullptr &&
> + "Module with null LLVMContext?");
> +
> + // Instrumentation of runtime calls
> + auto &InContext = *TSM.getContext().getContext();
> + auto SpeculatorVTy = StructType::create(InContext, "Class.Speculator");
> + auto RuntimeCallTy = FunctionType::get(
> + Type::getVoidTy(InContext),
> + {SpeculatorVTy->getPointerTo(), Type::getInt64Ty(InContext)}, false);
> + auto RuntimeCall =
> + Function::Create(RuntimeCallTy, Function::LinkageTypes::ExternalLinkage,
> + "__orc_speculate_for", TSM.getModuleUnlocked());
> + auto SpeclAddr = new GlobalVariable(
> + *TSM.getModuleUnlocked(), SpeculatorVTy, false,
> + GlobalValue::LinkageTypes::ExternalLinkage, nullptr, "__orc_speculator");
> +
> + IRBuilder<> Mutator(InContext);
> +
> + // QueryAnalysis allowed to transform the IR source, one such example is
> + // Simplify CFG helps the static branch prediction heuristics!
> + for (auto &Fn : TSM.getModuleUnlocked()->getFunctionList()) {
> + if (!Fn.isDeclaration()) {
> + auto IRNames = QueryAnalysis(Fn, FAM);
> + // Instrument and register if Query has result
> + if (IRNames.hasValue()) {
> + Mutator.SetInsertPoint(&(Fn.getEntryBlock().front()));
> + auto ImplAddrToUint =
> + Mutator.CreatePtrToInt(&Fn, Type::getInt64Ty(InContext));
> + Mutator.CreateCall(RuntimeCallTy, RuntimeCall,
> + {SpeclAddr, ImplAddrToUint});
> + S.registerSymbols(internToJITSymbols(IRNames.getValue()),
> + &R.getTargetJITDylib());
> + }
> + }
> + }
> + // No locking needed read only operation.
> + assert(!(verifyModule(*TSM.getModuleUnlocked())) &&
> + "Speculation Instrumentation breaks IR?");
> +
> + NextLayer.emit(std::move(R), std::move(TSM));
> +}
> +
> +// Runtime Function Implementation
> +extern "C" void __orc_speculate_for(Speculator *Ptr, uint64_t StubId) {
> + assert(Ptr && " Null Address Received in orc_speculate_for ");
> + Ptr->speculateFor(StubId);
> +}
> +
> +} // namespace orc
> +} // namespace llvm
>
>
> _______________________________________________
> llvm-commits mailing list
> llvm-commits at lists.llvm.org
> https://lists.llvm.org/cgi-bin/mailman/listinfo/llvm-commits
More information about the llvm-commits
mailing list