[llvm] 7e7021c - [mlgo] Update FunctionPropertyCache after invalidating analyses

Mircea Trofin via llvm-commits llvm-commits at lists.llvm.org
Fri Jun 10 16:18:23 PDT 2022


Author: Mircea Trofin
Date: 2022-06-10T16:18:14-07:00
New Revision: 7e7021ca1a8f8b7621832c84745793206573b301

URL: https://github.com/llvm/llvm-project/commit/7e7021ca1a8f8b7621832c84745793206573b301
DIFF: https://github.com/llvm/llvm-project/commit/7e7021ca1a8f8b7621832c84745793206573b301.diff

LOG: [mlgo] Update FunctionPropertyCache after invalidating analyses

The update depends on LoopInfo, so we need that refreshed first, not
after.

Differential Revision: https://reviews.llvm.org/D127467

Added: 
    llvm/test/Transforms/Inline/ML/fpi-update.ll

Modified: 
    llvm/include/llvm/Analysis/FunctionPropertiesAnalysis.h
    llvm/include/llvm/Analysis/MLInlineAdvisor.h
    llvm/lib/Analysis/FunctionPropertiesAnalysis.cpp
    llvm/lib/Analysis/MLInlineAdvisor.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/FunctionPropertiesAnalysis.h b/llvm/include/llvm/Analysis/FunctionPropertiesAnalysis.h
index 1eb02de59b717..111ecbb0f3085 100644
--- a/llvm/include/llvm/Analysis/FunctionPropertiesAnalysis.h
+++ b/llvm/include/llvm/Analysis/FunctionPropertiesAnalysis.h
@@ -111,7 +111,7 @@ class FunctionPropertiesUpdater {
 public:
   FunctionPropertiesUpdater(FunctionPropertiesInfo &FPI, const CallBase &CB);
 
-  void finish(const LoopInfo &LI);
+  void finish(const LoopInfo &LI) const;
 
 private:
   FunctionPropertiesInfo &FPI;

diff  --git a/llvm/include/llvm/Analysis/MLInlineAdvisor.h b/llvm/include/llvm/Analysis/MLInlineAdvisor.h
index 0b416ee57a94e..ce83432db506e 100644
--- a/llvm/include/llvm/Analysis/MLInlineAdvisor.h
+++ b/llvm/include/llvm/Analysis/MLInlineAdvisor.h
@@ -44,7 +44,6 @@ class MLInlineAdvisor : public InlineAdvisor {
   int64_t getLocalCalls(Function &F);
   const MLModelRunner &getModelRunner() const { return *ModelRunner.get(); }
   FunctionPropertiesInfo &getCachedFPI(Function &) const;
-  const LoopInfo &getLoopInfo(Function &F) const;
 
 protected:
   std::unique_ptr<InlineAdvice> getAdviceImpl(CallBase &CB) override;
@@ -67,10 +66,7 @@ class MLInlineAdvisor : public InlineAdvisor {
 private:
   int64_t getModuleIRSize() const;
 
-  void print(raw_ostream &OS) const override {
-    OS << "[MLInlineAdvisor] Nodes: " << NodeCount << " Edges: " << EdgeCount
-       << "\n";
-  }
+  void print(raw_ostream &OS) const override;
 
   mutable DenseMap<const Function *, FunctionPropertiesInfo> FPICache;
 
@@ -107,10 +103,10 @@ class MLInlineAdvice : public InlineAdvice {
   const int64_t CallerIRSize;
   const int64_t CalleeIRSize;
   const int64_t CallerAndCalleeEdges;
+  void updateCachedCallerFPI(const LoopInfo &LI) const;
 
 private:
   void reportContextForRemark(DiagnosticInfoOptimizationBase &OR);
-  void updateCachedCallerFPI();
   MLInlineAdvisor *getAdvisor() const {
     return static_cast<MLInlineAdvisor *>(Advisor);
   };

diff  --git a/llvm/lib/Analysis/FunctionPropertiesAnalysis.cpp b/llvm/lib/Analysis/FunctionPropertiesAnalysis.cpp
index 1bac19a9eb20c..41975561fe65a 100644
--- a/llvm/lib/Analysis/FunctionPropertiesAnalysis.cpp
+++ b/llvm/lib/Analysis/FunctionPropertiesAnalysis.cpp
@@ -153,7 +153,7 @@ FunctionPropertiesUpdater::FunctionPropertiesUpdater(
     FPI.updateForBB(*BB, -1);
 }
 
-void FunctionPropertiesUpdater::finish(const LoopInfo &LI) {
+void FunctionPropertiesUpdater::finish(const LoopInfo &LI) const {
   DenseSet<const BasicBlock *> ReIncluded;
   std::deque<const BasicBlock *> Worklist;
 

diff  --git a/llvm/lib/Analysis/MLInlineAdvisor.cpp b/llvm/lib/Analysis/MLInlineAdvisor.cpp
index 2c660f3eccdd0..c46946cac0af3 100644
--- a/llvm/lib/Analysis/MLInlineAdvisor.cpp
+++ b/llvm/lib/Analysis/MLInlineAdvisor.cpp
@@ -51,6 +51,12 @@ static cl::opt<float> SizeIncreaseThreshold(
              "blocking any further inlining."),
     cl::init(2.0));
 
+static cl::opt<bool> KeepFPICache(
+    "ml-advisor-keep-fpi-cache", cl::Hidden,
+    cl::desc(
+        "For test - keep the ML Inline advisor's FunctionPropertiesInfo cache"),
+    cl::init(false));
+
 // clang-format off
 const std::array<TensorSpec, NumberOfFeatures> llvm::FeatureMap{
 #define POPULATE_NAMES(_, NAME) TensorSpec::createSpec<int64_t>(NAME, {1} ),
@@ -175,7 +181,8 @@ void MLInlineAdvisor::onPassEntry() {
 
 void MLInlineAdvisor::onPassExit(LazyCallGraph::SCC *LastSCC) {
   // No need to keep this around - function passes will invalidate it.
-  FPICache.clear();
+  if (!KeepFPICache)
+    FPICache.clear();
   if (!LastSCC || ForceStop)
     return;
   // Keep track of the nodes and edges we last saw. Then, in onPassEntry,
@@ -206,13 +213,12 @@ void MLInlineAdvisor::onSuccessfulInlining(const MLInlineAdvice &Advice,
   assert(!ForceStop);
   Function *Caller = Advice.getCaller();
   Function *Callee = Advice.getCallee();
-
   // The caller features aren't valid anymore.
   {
-    PreservedAnalyses PA = PreservedAnalyses::all();
-    PA.abandon<FunctionPropertiesAnalysis>();
+    PreservedAnalyses PA = PreservedAnalyses::none();
     FAM.invalidate(*Caller, PA);
   }
+  Advice.updateCachedCallerFPI(FAM.getResult<LoopAnalysis>(*Caller));
   int64_t IRSizeAfter =
       getIRSize(*Caller) + (CalleeWasDeleted ? 0 : Advice.CalleeIRSize);
   CurrentIRSize += IRSizeAfter - (Advice.CallerIRSize + Advice.CalleeIRSize);
@@ -366,15 +372,23 @@ std::unique_ptr<InlineAdvice> MLInlineAdvisor::getMandatoryAdvice(CallBase &CB,
   return std::make_unique<InlineAdvice>(this, CB, getCallerORE(CB), Advice);
 }
 
-const LoopInfo &MLInlineAdvisor::getLoopInfo(Function &F) const {
-  return FAM.getResult<LoopAnalysis>(F);
-}
-
 std::unique_ptr<MLInlineAdvice>
 MLInlineAdvisor::getMandatoryAdviceImpl(CallBase &CB) {
   return std::make_unique<MLInlineAdvice>(this, CB, getCallerORE(CB), true);
 }
 
+void MLInlineAdvisor::print(raw_ostream &OS) const {
+  OS << "[MLInlineAdvisor] Nodes: " << NodeCount << " Edges: " << EdgeCount
+     << "\n";
+  OS << "[MLInlineAdvisor] FPI:\n";
+  for (auto I : FPICache) {
+    OS << I.getFirst()->getName() << ":\n";
+    I.getSecond().print(OS);
+    OS << "\n";
+  }
+  OS << "\n";
+}
+
 MLInlineAdvice::MLInlineAdvice(MLInlineAdvisor *Advisor, CallBase &CB,
                                OptimizationRemarkEmitter &ORE,
                                bool Recommendation)
@@ -400,12 +414,11 @@ void MLInlineAdvice::reportContextForRemark(
   OR << NV("ShouldInline", isInliningRecommended());
 }
 
-void MLInlineAdvice::updateCachedCallerFPI() {
-  FPU->finish(getAdvisor()->getLoopInfo(*Caller));
+void MLInlineAdvice::updateCachedCallerFPI(const LoopInfo &LI) const {
+  FPU->finish(LI);
 }
 
 void MLInlineAdvice::recordInliningImpl() {
-  updateCachedCallerFPI();
   ORE.emit([&]() {
     OptimizationRemark R(DEBUG_TYPE, "InliningSuccess", DLoc, Block);
     reportContextForRemark(R);
@@ -415,7 +428,6 @@ void MLInlineAdvice::recordInliningImpl() {
 }
 
 void MLInlineAdvice::recordInliningWithCalleeDeletedImpl() {
-  updateCachedCallerFPI();
   ORE.emit([&]() {
     OptimizationRemark R(DEBUG_TYPE, "InliningSuccessWithCalleeDeleted", DLoc,
                          Block);

diff  --git a/llvm/test/Transforms/Inline/ML/fpi-update.ll b/llvm/test/Transforms/Inline/ML/fpi-update.ll
new file mode 100644
index 0000000000000..9208ad70b9a35
--- /dev/null
+++ b/llvm/test/Transforms/Inline/ML/fpi-update.ll
@@ -0,0 +1,32 @@
+target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-grtev4-linux-gnu"
+
+; TODO: we could instantiate the MLInliner with a non-model generated evaluator
+; and drop the requirement
+; REQUIRES: llvm_inliner_model_autogenerated
+
+; RUN: opt -enable-ml-inliner=release -passes='scc-oz-module-inliner,print<inline-advisor>' \
+; RUN:     -keep-inline-advisor-for-printing -max-devirt-iterations=0 \
+; RUN:     -mandatory-inlining-first=0 -S < %s 2>&1 | FileCheck %s 
+
+define void @caller(i32 %i) #1 {
+  call void @callee(i32 %i)
+  ret void
+}
+
+define void @callee(i32 %i) #0 {
+entry:
+  br label %loop
+loop:
+  %cond = icmp slt i32 %i, 0
+  br i1 %cond, label %loop, label %exit
+exit:
+  ret void
+}
+
+attributes #0 = { alwaysinline }
+attributes #1 = { noinline optnone }
+
+; CHECK: [MLInlineAdvisor] FPI:
+; CHECK: caller:
+; CHECK: MaxLoopDepth: 1


        


More information about the llvm-commits mailing list