[llvm] 04f2712 - [NFC][MLGO] Factor ModelUnderTrainingRunner for reuse

Mircea Trofin via llvm-commits llvm-commits at lists.llvm.org
Fri Dec 10 11:24:26 PST 2021


Author: Mircea Trofin
Date: 2021-12-10T11:24:15-08:00
New Revision: 04f2712ef4e4d154f8b4d94db8020a901dbf22ff

URL: https://github.com/llvm/llvm-project/commit/04f2712ef4e4d154f8b4d94db8020a901dbf22ff
DIFF: https://github.com/llvm/llvm-project/commit/04f2712ef4e4d154f8b4d94db8020a901dbf22ff.diff

LOG: [NFC][MLGO] Factor ModelUnderTrainingRunner for reuse

This is so we may reuse it. It was very non-inliner specific already.

Differential Revision: https://reviews.llvm.org/D115465

Added: 
    llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h
    llvm/lib/Analysis/ModelUnderTrainingRunner.cpp

Modified: 
    llvm/lib/Analysis/CMakeLists.txt
    llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h b/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h
new file mode 100644
index 0000000000000..ca99d5d01eefa
--- /dev/null
+++ b/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h
@@ -0,0 +1,59 @@
+//===- ModelUnderTrainingRunner.h -- 'development' mode runner --*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+
+#ifndef LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H
+#define LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H
+
+#include "llvm/Config/llvm-config.h"
+
+#ifdef LLVM_HAVE_TF_API
+#include "llvm/Analysis/MLModelRunner.h"
+#include "llvm/Analysis/Utils/TFUtils.h"
+#include "llvm/IR/LLVMContext.h"
+#include "llvm/IR/PassManager.h"
+
+namespace llvm {
+
+/// ModelUnderTrainingRunner - training mode implementation. It uses TF C APIs
+/// to dynamically load and evaluate a TF SavedModel
+/// (https://www.tensorflow.org/guide/saved_model). Runtime performance is
+/// sacrificed for ease of use while training.
+class ModelUnderTrainingRunner final : public MLModelRunner {
+public:
+  ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath,
+                           const std::vector<TensorSpec> &InputSpecs,
+                           const std::vector<LoggedFeatureSpec> &OutputSpecs);
+
+  // Disallows copy and assign.
+  ModelUnderTrainingRunner(const ModelUnderTrainingRunner &) = delete;
+  ModelUnderTrainingRunner &
+  operator=(const ModelUnderTrainingRunner &) = delete;
+
+  bool isValid() const { return !!Evaluator; }
+
+  const std::vector<LoggedFeatureSpec> &outputLoggedFeatureSpecs() const {
+    return OutputSpecs;
+  }
+
+  const Optional<TFModelEvaluator::EvaluationResult> &
+  lastEvaluationResult() const {
+    return LastEvaluationResult;
+  }
+
+private:
+  std::unique_ptr<TFModelEvaluator> Evaluator;
+  const std::vector<LoggedFeatureSpec> OutputSpecs;
+  Optional<TFModelEvaluator::EvaluationResult> LastEvaluationResult;
+  void *evaluateUntyped() override;
+  void *getTensorUntyped(size_t Index) override;
+};
+
+} // namespace llvm
+#endif // define(LLVM_HAVE_TF_API)
+#endif // LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H

diff  --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt
index 34ca125068b19..ba5735061897b 100644
--- a/llvm/lib/Analysis/CMakeLists.txt
+++ b/llvm/lib/Analysis/CMakeLists.txt
@@ -103,6 +103,7 @@ add_llvm_component_library(LLVMAnalysis
   MemoryLocation.cpp
   MemorySSA.cpp
   MemorySSAUpdater.cpp
+  ModelUnderTrainingRunner.cpp
   ModuleDebugInfoPrinter.cpp
   ModuleSummaryAnalysis.cpp
   MustExecute.cpp

diff  --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
index b76d63dddeb01..31b2dafa29b40 100644
--- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
+++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
@@ -16,6 +16,7 @@
 #include "llvm/Analysis/CallGraph.h"
 #include "llvm/Analysis/InlineSizeEstimatorAnalysis.h"
 #include "llvm/Analysis/MLInlineAdvisor.h"
+#include "llvm/Analysis/ModelUnderTrainingRunner.h"
 #include "llvm/Analysis/NoInferenceModelRunner.h"
 #include "llvm/Analysis/Utils/TFUtils.h"
 #include "llvm/IR/LLVMContext.h"
@@ -95,7 +96,6 @@ struct InlineEvent {
 /// Because this is a protobuf, we cannot just stream the events as they come.
 /// Internally, TrainingLogger stores data in column-major format, because that
 /// lines up with how TF SequenceExample represents it.
-class ModelUnderTrainingRunner;
 class TrainingLogger final {
 public:
   TrainingLogger(StringRef LogFileName, const ModelUnderTrainingRunner *MUTR);
@@ -262,55 +262,21 @@ class LoggingMLInlineAdvice : public MLInlineAdvice {
   const int64_t Mandatory;
 };
 
-/// ModelUnderTrainingRunner - training mode implementation. It uses TF C APIs
-/// to dynamically load and evaluate a TF SavedModel
-/// (https://www.tensorflow.org/guide/saved_model). Runtime performance is
-/// sacrificed for ease of use while training.
-class ModelUnderTrainingRunner final : public MLModelRunner {
-public:
-  ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath);
-
-  // Disallows copy and assign.
-  ModelUnderTrainingRunner(const ModelUnderTrainingRunner &) = delete;
-  ModelUnderTrainingRunner &
-  operator=(const ModelUnderTrainingRunner &) = delete;
-
-  bool isValid() const { return !!Evaluator; }
-
-  const std::vector<LoggedFeatureSpec> &outputLoggedFeatureSpecs() const {
-    return OutputSpecs;
-  }
-
-  const Optional<TFModelEvaluator::EvaluationResult> &
-  lastEvaluationResult() const {
-    return LastEvaluationResult;
-  }
-
-  static const std::vector<TensorSpec> getInputFeatures() {
-    std::vector<TensorSpec> InputSpecs;
-    for (size_t I = 0; I < NumberOfFeatures; ++I)
-      InputSpecs.push_back(TensorSpec::createSpec<int64_t>(
-          TFFeedPrefix + FeatureNameMap[I], {1}));
-    append_range(InputSpecs, TrainingOnlyFeatures);
-    return InputSpecs;
-  }
-
-private:
-  std::unique_ptr<TFModelEvaluator> Evaluator;
-  std::vector<LoggedFeatureSpec> OutputSpecs;
-  Optional<TFModelEvaluator::EvaluationResult> LastEvaluationResult;
-  void *evaluateUntyped() override;
-  void *getTensorUntyped(size_t Index) override;
-
-  // The training framework needs some additional features.
-  const static std::vector<TensorSpec> TrainingOnlyFeatures;
-};
-
-const std::vector<TensorSpec> ModelUnderTrainingRunner::TrainingOnlyFeatures{
+static const std::vector<TensorSpec> TrainingOnlyFeatures{
     TensorSpec::createSpec<int64_t>(TFFeedPrefix + "inlining_default", {1}),
     TensorSpec::createSpec<float>(TFFeedPrefix + "discount", {1}),
     TensorSpec::createSpec<float>(TFFeedPrefix + "reward", {1}),
     TensorSpec::createSpec<int32_t>(TFFeedPrefix + "step_type", {1})};
+
+static const std::vector<TensorSpec> getInputFeatures() {
+  std::vector<TensorSpec> InputSpecs;
+  for (size_t I = 0; I < NumberOfFeatures; ++I)
+    InputSpecs.push_back(
+        TensorSpec::createSpec<int64_t>(TFFeedPrefix + FeatureNameMap[I], {1}));
+  append_range(InputSpecs, TrainingOnlyFeatures);
+  return InputSpecs;
+}
+
 } // namespace
 
 TrainingLogger::TrainingLogger(StringRef LogFileName,
@@ -451,40 +417,6 @@ size_t DevelopmentModeMLInlineAdvisor::getTotalSizeEstimate() {
   return Ret;
 }
 
-ModelUnderTrainingRunner::ModelUnderTrainingRunner(LLVMContext &Ctx,
-                                                   const std::string &ModelPath)
-    : MLModelRunner(Ctx) {
-  std::vector<TensorSpec> InputSpecs =
-      ModelUnderTrainingRunner::getInputFeatures();
-  if (auto MaybeOutSpecs =
-          loadOutputSpecs(Ctx, DecisionName, ModelPath, TFOutputSpecOverride))
-    OutputSpecs = std::move(*MaybeOutSpecs);
-  else
-    return;
-
-  Evaluator = std::make_unique<TFModelEvaluator>(
-      ModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I].Spec; },
-      OutputSpecs.size());
-  if (!Evaluator || !Evaluator->isValid()) {
-    Ctx.emitError("Failed to create inliner saved model evaluator");
-    Evaluator.reset();
-    return;
-  }
-}
-
-void *ModelUnderTrainingRunner::evaluateUntyped() {
-  LastEvaluationResult = Evaluator->evaluate();
-  if (!LastEvaluationResult.hasValue()) {
-    Ctx.emitError("Error evaluating model.");
-    return nullptr;
-  }
-  return LastEvaluationResult->getTensorValue<int64_t>(0);
-}
-
-void *ModelUnderTrainingRunner::getTensorUntyped(size_t Index) {
-  return Evaluator->getUntypedInput(Index);
-}
-
 std::unique_ptr<InlineAdvisor> llvm::getDevelopmentModeAdvisor(
     Module &M, ModuleAnalysisManager &MAM,
     std::function<bool(CallBase &)> GetDefaultAdvice) {
@@ -493,11 +425,13 @@ std::unique_ptr<InlineAdvisor> llvm::getDevelopmentModeAdvisor(
   ModelUnderTrainingRunner *MUTRPtr = nullptr;
   bool IsDoingInference = false;
   if (TFModelUnderTrainingPath.empty())
-    Runner.reset(new NoInferenceModelRunner(
-        Ctx, ModelUnderTrainingRunner::getInputFeatures()));
+    Runner.reset(new NoInferenceModelRunner(Ctx, getInputFeatures()));
   else {
-    auto MUTR = std::make_unique<ModelUnderTrainingRunner>(
-        Ctx, TFModelUnderTrainingPath);
+    std::unique_ptr<ModelUnderTrainingRunner> MUTR;
+    if (auto MaybeOutputSpecs = loadOutputSpecs(
+            Ctx, DecisionName, TFModelUnderTrainingPath, TFOutputSpecOverride))
+      MUTR = std::make_unique<ModelUnderTrainingRunner>(
+          Ctx, TFModelUnderTrainingPath, getInputFeatures(), *MaybeOutputSpecs);
     if (!MUTR || !MUTR->isValid()) {
       Ctx.emitError("Could not load the policy model from the provided path");
       return nullptr;

diff  --git a/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp b/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp
new file mode 100644
index 0000000000000..941458f648bcc
--- /dev/null
+++ b/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp
@@ -0,0 +1,49 @@
+//===- ModelUnderTrainingRunner.cpp - 'development' mode runner -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of a MLModelRunner for 'development' mode, i.e. evaluation
+// happens off a model that's provided from the command line and is interpreted.
+//
+//===----------------------------------------------------------------------===//
+
+#include "llvm/Config/config.h"
+#if defined(LLVM_HAVE_TF_API)
+
+#include "llvm/Analysis/ModelUnderTrainingRunner.h"
+
+using namespace llvm;
+
+ModelUnderTrainingRunner::ModelUnderTrainingRunner(
+    LLVMContext &Ctx, const std::string &ModelPath,
+    const std::vector<TensorSpec> &InputSpecs,
+    const std::vector<LoggedFeatureSpec> &OutputSpecs)
+    : MLModelRunner(Ctx), OutputSpecs(OutputSpecs) {
+  Evaluator = std::make_unique<TFModelEvaluator>(
+      ModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I].Spec; },
+      OutputSpecs.size());
+  if (!Evaluator || !Evaluator->isValid()) {
+    Ctx.emitError("Failed to create inliner saved model evaluator");
+    Evaluator.reset();
+    return;
+  }
+}
+
+void *ModelUnderTrainingRunner::evaluateUntyped() {
+  LastEvaluationResult = Evaluator->evaluate();
+  if (!LastEvaluationResult.hasValue()) {
+    Ctx.emitError("Error evaluating model.");
+    return nullptr;
+  }
+  return LastEvaluationResult->getUntypedTensorValue(0);
+}
+
+void *ModelUnderTrainingRunner::getTensorUntyped(size_t Index) {
+  return Evaluator->getUntypedInput(Index);
+}
+
+#endif // defined(LLVM_HAVE_TF_API)


        


More information about the llvm-commits mailing list