[llvm] a120fdd - [NFC][MLGO]Add RTTI support for MLModelRunner and simplify runner setup

Mircea Trofin via llvm-commits llvm-commits at lists.llvm.org
Tue Jan 4 19:46:28 PST 2022


Author: Mircea Trofin
Date: 2022-01-04T19:46:14-08:00
New Revision: a120fdd337fc9b10d478aabfdfb2be0395d9e42f

URL: https://github.com/llvm/llvm-project/commit/a120fdd337fc9b10d478aabfdfb2be0395d9e42f
DIFF: https://github.com/llvm/llvm-project/commit/a120fdd337fc9b10d478aabfdfb2be0395d9e42f.diff

LOG: [NFC][MLGO]Add RTTI support for MLModelRunner and simplify runner setup

Added: 
    

Modified: 
    llvm/include/llvm/Analysis/MLModelRunner.h
    llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h
    llvm/include/llvm/Analysis/NoInferenceModelRunner.h
    llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
    llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
    llvm/lib/Analysis/ModelUnderTrainingRunner.cpp
    llvm/lib/Analysis/NoInferenceModelRunner.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/MLModelRunner.h b/llvm/include/llvm/Analysis/MLModelRunner.h
index 90b3cc7e76e65..3a6fa99347fb5 100644
--- a/llvm/include/llvm/Analysis/MLModelRunner.h
+++ b/llvm/include/llvm/Analysis/MLModelRunner.h
@@ -41,8 +41,13 @@ class MLModelRunner {
         getTensorUntyped(static_cast<size_t>(FeatureID)));
   }
 
+  enum class Kind : int { Unknown, Release, Development, NoOp };
+  Kind getKind() const { return Type; }
+
 protected:
-  MLModelRunner(LLVMContext &Ctx) : Ctx(Ctx) {}
+  MLModelRunner(LLVMContext &Ctx, Kind Type) : Ctx(Ctx), Type(Type) {
+    assert(Type != Kind::Unknown);
+  }
   virtual void *evaluateUntyped() = 0;
   virtual void *getTensorUntyped(size_t Index) = 0;
   const void *getTensorUntyped(size_t Index) const {
@@ -50,6 +55,7 @@ class MLModelRunner {
   }
 
   LLVMContext &Ctx;
+  const Kind Type;
 };
 } // namespace llvm
 

diff  --git a/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h b/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h
index ca99d5d01eefa..071ccf96fe5b0 100644
--- a/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h
+++ b/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h
@@ -26,17 +26,11 @@ namespace llvm {
 /// sacrificed for ease of use while training.
 class ModelUnderTrainingRunner final : public MLModelRunner {
 public:
-  ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath,
-                           const std::vector<TensorSpec> &InputSpecs,
-                           const std::vector<LoggedFeatureSpec> &OutputSpecs);
-
   // Disallows copy and assign.
   ModelUnderTrainingRunner(const ModelUnderTrainingRunner &) = delete;
   ModelUnderTrainingRunner &
   operator=(const ModelUnderTrainingRunner &) = delete;
 
-  bool isValid() const { return !!Evaluator; }
-
   const std::vector<LoggedFeatureSpec> &outputLoggedFeatureSpecs() const {
     return OutputSpecs;
   }
@@ -45,13 +39,27 @@ class ModelUnderTrainingRunner final : public MLModelRunner {
   lastEvaluationResult() const {
     return LastEvaluationResult;
   }
+  static bool classof(const MLModelRunner *R) {
+    return R->getKind() == MLModelRunner::Kind::Development;
+  }
+
+  static std::unique_ptr<ModelUnderTrainingRunner>
+  createAndEnsureValid(LLVMContext &Ctx, const std::string &ModelPath,
+                       StringRef DecisionName,
+                       const std::vector<TensorSpec> &InputSpecs,
+                       StringRef OutputSpecsPathOverride = "");
 
 private:
+  ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath,
+                           const std::vector<TensorSpec> &InputSpecs,
+                           const std::vector<LoggedFeatureSpec> &OutputSpecs);
+
   std::unique_ptr<TFModelEvaluator> Evaluator;
   const std::vector<LoggedFeatureSpec> OutputSpecs;
   Optional<TFModelEvaluator::EvaluationResult> LastEvaluationResult;
   void *evaluateUntyped() override;
   void *getTensorUntyped(size_t Index) override;
+  bool isValid() const { return !!Evaluator; }
 };
 
 } // namespace llvm

diff  --git a/llvm/include/llvm/Analysis/NoInferenceModelRunner.h b/llvm/include/llvm/Analysis/NoInferenceModelRunner.h
index 60d6777c765b8..1e8cb26cc3c14 100644
--- a/llvm/include/llvm/Analysis/NoInferenceModelRunner.h
+++ b/llvm/include/llvm/Analysis/NoInferenceModelRunner.h
@@ -26,6 +26,10 @@ class NoInferenceModelRunner : public MLModelRunner {
   NoInferenceModelRunner(LLVMContext &Ctx,
                          const std::vector<TensorSpec> &Inputs);
 
+  static bool classof(const MLModelRunner *R) {
+    return R->getKind() == MLModelRunner::Kind::NoOp;
+  }
+
 private:
   void *evaluateUntyped() override {
     llvm_unreachable("We shouldn't call run on this model runner.");

diff  --git a/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h b/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
index b684f87ea5cba..bb19d2c7d9260 100644
--- a/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
+++ b/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
@@ -29,7 +29,8 @@ class ReleaseModeModelRunner final : public MLModelRunner {
   ReleaseModeModelRunner(LLVMContext &Ctx, const FType &FeatureNames,
                          StringRef DecisionName, StringRef FeedPrefix = "feed_",
                          StringRef FetchPrefix = "fetch_")
-      : MLModelRunner(Ctx), CompiledModel(std::make_unique<TGen>()) {
+      : MLModelRunner(Ctx, MLModelRunner::Kind::Release),
+        CompiledModel(std::make_unique<TGen>()) {
     assert(CompiledModel && "The CompiledModel should be valid");
 
     const size_t FeatureCount = FeatureNames.size();
@@ -49,6 +50,10 @@ class ReleaseModeModelRunner final : public MLModelRunner {
 
   virtual ~ReleaseModeModelRunner() = default;
 
+  static bool classof(const MLModelRunner *R) {
+    return R->getKind() == MLModelRunner::Kind::Release;
+  }
+
 private:
   void *evaluateUntyped() override {
     CompiledModel->Run();

diff  --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
index 31b2dafa29b40..26bdbda56ba66 100644
--- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
+++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
@@ -11,6 +11,7 @@
 //
 //===----------------------------------------------------------------------===//
 #include "llvm/Config/config.h"
+#include "llvm/Support/Casting.h"
 #if defined(LLVM_HAVE_TF_API)
 
 #include "llvm/Analysis/CallGraph.h"
@@ -150,7 +151,7 @@ class DevelopmentModeMLInlineAdvisor : public MLInlineAdvisor {
   DevelopmentModeMLInlineAdvisor(
       Module &M, ModuleAnalysisManager &MAM,
       std::unique_ptr<MLModelRunner> ModelRunner,
-      std::function<bool(CallBase &)> GetDefaultAdvice, bool IsDoingInference,
+      std::function<bool(CallBase &)> GetDefaultAdvice,
       std::unique_ptr<TrainingLogger> Logger);
 
   size_t getTotalSizeEstimate();
@@ -341,10 +342,11 @@ void TrainingLogger::print() {
 DevelopmentModeMLInlineAdvisor::DevelopmentModeMLInlineAdvisor(
     Module &M, ModuleAnalysisManager &MAM,
     std::unique_ptr<MLModelRunner> ModelRunner,
-    std::function<bool(CallBase &)> GetDefaultAdvice, bool IsDoingInference,
+    std::function<bool(CallBase &)> GetDefaultAdvice,
     std::unique_ptr<TrainingLogger> Logger)
     : MLInlineAdvisor(M, MAM, std::move(ModelRunner)),
-      GetDefaultAdvice(GetDefaultAdvice), IsDoingInference(IsDoingInference),
+      GetDefaultAdvice(GetDefaultAdvice),
+      IsDoingInference(isa<ModelUnderTrainingRunner>(getModelRunner())),
       Logger(std::move(Logger)),
       InitialNativeSize(isLogging() ? getTotalSizeEstimate() : 0),
       CurrentNativeSize(InitialNativeSize) {
@@ -422,30 +424,20 @@ std::unique_ptr<InlineAdvisor> llvm::getDevelopmentModeAdvisor(
     std::function<bool(CallBase &)> GetDefaultAdvice) {
   auto &Ctx = M.getContext();
   std::unique_ptr<MLModelRunner> Runner;
-  ModelUnderTrainingRunner *MUTRPtr = nullptr;
-  bool IsDoingInference = false;
   if (TFModelUnderTrainingPath.empty())
     Runner.reset(new NoInferenceModelRunner(Ctx, getInputFeatures()));
-  else {
-    std::unique_ptr<ModelUnderTrainingRunner> MUTR;
-    if (auto MaybeOutputSpecs = loadOutputSpecs(
-            Ctx, DecisionName, TFModelUnderTrainingPath, TFOutputSpecOverride))
-      MUTR = std::make_unique<ModelUnderTrainingRunner>(
-          Ctx, TFModelUnderTrainingPath, getInputFeatures(), *MaybeOutputSpecs);
-    if (!MUTR || !MUTR->isValid()) {
-      Ctx.emitError("Could not load the policy model from the provided path");
-      return nullptr;
-    }
-    IsDoingInference = true;
-    MUTRPtr = MUTR.get();
-    Runner = std::move(MUTR);
-  }
+  else
+    Runner = ModelUnderTrainingRunner::createAndEnsureValid(
+        Ctx, TFModelUnderTrainingPath, DecisionName, getInputFeatures(),
+        TFOutputSpecOverride);
+  if (!Runner)
+    return nullptr;
   std::unique_ptr<TrainingLogger> Logger;
   if (!TrainingLog.empty())
-    Logger = std::make_unique<TrainingLogger>(TrainingLog, MUTRPtr);
+    Logger = std::make_unique<TrainingLogger>(
+        TrainingLog, dyn_cast<ModelUnderTrainingRunner>(Runner.get()));
 
   return std::make_unique<DevelopmentModeMLInlineAdvisor>(
-      M, MAM, std::move(Runner), GetDefaultAdvice, IsDoingInference,
-      std::move(Logger));
+      M, MAM, std::move(Runner), GetDefaultAdvice, std::move(Logger));
 }
 #endif // defined(LLVM_HAVE_TF_API)

diff  --git a/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp b/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp
index 941458f648bcc..c1385fc2ddc2d 100644
--- a/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp
+++ b/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp
@@ -22,7 +22,8 @@ ModelUnderTrainingRunner::ModelUnderTrainingRunner(
     LLVMContext &Ctx, const std::string &ModelPath,
     const std::vector<TensorSpec> &InputSpecs,
     const std::vector<LoggedFeatureSpec> &OutputSpecs)
-    : MLModelRunner(Ctx), OutputSpecs(OutputSpecs) {
+    : MLModelRunner(Ctx, MLModelRunner::Kind::Development),
+      OutputSpecs(OutputSpecs) {
   Evaluator = std::make_unique<TFModelEvaluator>(
       ModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I].Spec; },
       OutputSpecs.size());
@@ -46,4 +47,21 @@ void *ModelUnderTrainingRunner::getTensorUntyped(size_t Index) {
   return Evaluator->getUntypedInput(Index);
 }
 
+std::unique_ptr<ModelUnderTrainingRunner>
+ModelUnderTrainingRunner::createAndEnsureValid(
+    LLVMContext &Ctx, const std::string &ModelPath, StringRef DecisionName,
+    const std::vector<TensorSpec> &InputSpecs,
+    StringRef OutputSpecsPathOverride) {
+  std::unique_ptr<ModelUnderTrainingRunner> MUTR;
+  if (auto MaybeOutputSpecs = loadOutputSpecs(Ctx, DecisionName, ModelPath,
+                                              OutputSpecsPathOverride))
+    MUTR.reset(new ModelUnderTrainingRunner(Ctx, ModelPath, InputSpecs,
+                                            *MaybeOutputSpecs));
+  if (MUTR && MUTR->isValid())
+    return MUTR;
+
+  Ctx.emitError("Could not load the policy model from the provided path");
+  return nullptr;
+}
+
 #endif // defined(LLVM_HAVE_TF_API)

diff  --git a/llvm/lib/Analysis/NoInferenceModelRunner.cpp b/llvm/lib/Analysis/NoInferenceModelRunner.cpp
index 02ece6aa39006..7178120ebe4fc 100644
--- a/llvm/lib/Analysis/NoInferenceModelRunner.cpp
+++ b/llvm/lib/Analysis/NoInferenceModelRunner.cpp
@@ -20,7 +20,7 @@ using namespace llvm;
 
 NoInferenceModelRunner::NoInferenceModelRunner(
     LLVMContext &Ctx, const std::vector<TensorSpec> &Inputs)
-    : MLModelRunner(Ctx) {
+    : MLModelRunner(Ctx, MLModelRunner::Kind::NoOp) {
   ValuesBuffer.reserve(Inputs.size());
   for (const auto &TS : Inputs)
     ValuesBuffer.push_back(std::make_unique<char[]>(TS.getElementCount() *


        


More information about the llvm-commits mailing list