[llvm] c35ad9e - [mlgo] Support exposing more features than those supported by models

Mircea Trofin via llvm-commits llvm-commits at lists.llvm.org
Mon May 9 18:01:32 PDT 2022


Author: Mircea Trofin
Date: 2022-05-09T18:01:21-07:00
New Revision: c35ad9ee4f21c03baaea65e2479e9d08c4b4acd2

URL: https://github.com/llvm/llvm-project/commit/c35ad9ee4f21c03baaea65e2479e9d08c4b4acd2
DIFF: https://github.com/llvm/llvm-project/commit/c35ad9ee4f21c03baaea65e2479e9d08c4b4acd2.diff

LOG: [mlgo] Support exposing more features than those supported by models

This allows the compiler to support more features than those supported by a
model. The only requirement (development mode only) is that the new
features must be appended at the end of the list of features requested
from the model. The support is transparent to compiler code: for
unsupported features, we provide a valid buffer to copy their values;
it's just that this buffer is disconnected from the model, so insofar
as the model is concerned (AOT or development mode), these features don't
exist. The buffers are allocated at setup - meaning, at steady state,
there is no extra allocation (maintaining the current invariant). These
buffers has 2 roles: one, keep the compiler code simple. Second, allow
logging their values in development mode. The latter allows retraining
a model supporting the larger feature set starting from traces produced
with the old model.

For release mode (AOT-ed models), this decouples compiler evolution from
model evolution, which we want in scenarios where the toolchain is
frequently rebuilt and redeployed: we can first deploy the new features,
and continue working with the older model, until a new model is made
available, which can then be picked up the next time the compiler is built.

Differential Revision: https://reviews.llvm.org/D124565

Added: 
    

Modified: 
    llvm/include/llvm/Analysis/InlineModelFeatureMaps.h
    llvm/include/llvm/Analysis/MLModelRunner.h
    llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h
    llvm/include/llvm/Analysis/NoInferenceModelRunner.h
    llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
    llvm/include/llvm/Analysis/TensorSpec.h
    llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
    llvm/lib/Analysis/MLInlineAdvisor.cpp
    llvm/lib/Analysis/ModelUnderTrainingRunner.cpp
    llvm/lib/Analysis/NoInferenceModelRunner.cpp
    llvm/lib/Analysis/TFUtils.cpp
    llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp
    llvm/unittests/Analysis/CMakeLists.txt
    llvm/unittests/Analysis/MLModelRunnerTest.cpp
    llvm/unittests/Analysis/TFUtilsTest.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h b/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h
index 1afa8a825f15a..fb8236c28b251 100644
--- a/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h
+++ b/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h
@@ -10,6 +10,8 @@
 #ifndef LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H
 #define LLVM_ANALYSIS_INLINEMODELFEATUREMAPS_H
 
+#include "llvm/Analysis/TensorSpec.h"
+
 #include <array>
 #include <string>
 #include <vector>
@@ -127,7 +129,7 @@ inlineCostFeatureToMlFeature(InlineCostFeatureIndex Feature) {
 constexpr size_t NumberOfFeatures =
     static_cast<size_t>(FeatureIndex::NumberOfFeatures);
 
-extern const std::array<std::string, NumberOfFeatures> FeatureNameMap;
+extern const std::array<TensorSpec, NumberOfFeatures> FeatureMap;
 
 extern const char *const DecisionName;
 extern const char *const DefaultDecisionName;

diff  --git a/llvm/include/llvm/Analysis/MLModelRunner.h b/llvm/include/llvm/Analysis/MLModelRunner.h
index a923af5f06d2d..872c0e37f00e6 100644
--- a/llvm/include/llvm/Analysis/MLModelRunner.h
+++ b/llvm/include/llvm/Analysis/MLModelRunner.h
@@ -10,6 +10,7 @@
 #ifndef LLVM_ANALYSIS_MLMODELRUNNER_H
 #define LLVM_ANALYSIS_MLMODELRUNNER_H
 
+#include "llvm/Analysis/TensorSpec.h"
 #include "llvm/IR/PassManager.h"
 
 namespace llvm {
@@ -41,7 +42,7 @@ class MLModelRunner {
         getTensorUntyped(static_cast<size_t>(FeatureID)));
   }
 
-  virtual void *getTensorUntyped(size_t Index) = 0;
+  void *getTensorUntyped(size_t Index) { return InputBuffers[Index]; }
   const void *getTensorUntyped(size_t Index) const {
     return (const_cast<MLModelRunner *>(this))->getTensorUntyped(Index);
   }
@@ -50,13 +51,27 @@ class MLModelRunner {
   Kind getKind() const { return Type; }
 
 protected:
-  MLModelRunner(LLVMContext &Ctx, Kind Type) : Ctx(Ctx), Type(Type) {
+  MLModelRunner(LLVMContext &Ctx, Kind Type, size_t NrInputs)
+      : Ctx(Ctx), Type(Type), InputBuffers(NrInputs) {
     assert(Type != Kind::Unknown);
   }
   virtual void *evaluateUntyped() = 0;
 
+  void setUpBufferForTensor(size_t Index, const TensorSpec &Spec,
+                            void *Buffer) {
+    if (!Buffer) {
+      OwnedBuffers.emplace_back(Spec.getTotalTensorBufferSize());
+      Buffer = OwnedBuffers.back().data();
+    }
+    InputBuffers[Index] = Buffer;
+  }
+
   LLVMContext &Ctx;
   const Kind Type;
+
+private:
+  std::vector<void *> InputBuffers;
+  std::vector<std::vector<char *>> OwnedBuffers;
 };
 } // namespace llvm
 

diff  --git a/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h b/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h
index 071ccf96fe5b0..72bd185b6c32b 100644
--- a/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h
+++ b/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h
@@ -10,6 +10,7 @@
 #ifndef LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H
 #define LLVM_ANALYSIS_MODELUNDERTRAININGRUNNER_H
 
+#include "llvm/Analysis/TensorSpec.h"
 #include "llvm/Config/llvm-config.h"
 
 #ifdef LLVM_HAVE_TF_API
@@ -48,6 +49,11 @@ class ModelUnderTrainingRunner final : public MLModelRunner {
                        StringRef DecisionName,
                        const std::vector<TensorSpec> &InputSpecs,
                        StringRef OutputSpecsPathOverride = "");
+  static std::unique_ptr<ModelUnderTrainingRunner>
+  createAndEnsureValid(LLVMContext &Ctx, const std::string &ModelPath,
+                       StringRef DecisionName,
+                       const std::vector<TensorSpec> &InputSpecs,
+                       const std::vector<LoggedFeatureSpec> &OutputSpecs);
 
 private:
   ModelUnderTrainingRunner(LLVMContext &Ctx, const std::string &ModelPath,
@@ -58,7 +64,6 @@ class ModelUnderTrainingRunner final : public MLModelRunner {
   const std::vector<LoggedFeatureSpec> OutputSpecs;
   Optional<TFModelEvaluator::EvaluationResult> LastEvaluationResult;
   void *evaluateUntyped() override;
-  void *getTensorUntyped(size_t Index) override;
   bool isValid() const { return !!Evaluator; }
 };
 

diff  --git a/llvm/include/llvm/Analysis/NoInferenceModelRunner.h b/llvm/include/llvm/Analysis/NoInferenceModelRunner.h
index 5bcedf98865ca..980b40500d7cc 100644
--- a/llvm/include/llvm/Analysis/NoInferenceModelRunner.h
+++ b/llvm/include/llvm/Analysis/NoInferenceModelRunner.h
@@ -10,13 +10,9 @@
 #ifndef LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
 #define LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H
 
-#include "llvm/Config/llvm-config.h"
-
-/// While not strictly necessary to conditionally compile this, it really
-/// has no usecase outside the 'development' mode.
-#ifdef LLVM_HAVE_TF_API
 #include "llvm/Analysis/MLModelRunner.h"
-#include "llvm/Analysis/Utils/TFUtils.h"
+#include "llvm/Analysis/TensorSpec.h"
+#include "llvm/Config/llvm-config.h"
 namespace llvm {
 /// A pseudo model runner. We use it to store feature values when collecting
 /// logs for the default policy, in 'development' mode, but never ask it to
@@ -34,10 +30,6 @@ class NoInferenceModelRunner : public MLModelRunner {
   void *evaluateUntyped() override {
     llvm_unreachable("We shouldn't call run on this model runner.");
   }
-  void *getTensorUntyped(size_t Index) override;
-
-  std::vector<std::unique_ptr<char[]>> ValuesBuffer;
 };
 } // namespace llvm
-#endif // defined(LLVM_HAVE_TF_API)
 #endif // LLVM_ANALYSIS_NOINFERENCEMODELRUNNER_H

diff  --git a/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h b/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
index 6594b26ee6d96..bf1aaca2adbbe 100644
--- a/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
+++ b/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
@@ -15,6 +15,7 @@
 #define LLVM_ANALYSIS_RELEASEMODEMODELRUNNER_H
 
 #include "llvm/Analysis/MLModelRunner.h"
+#include "llvm/Analysis/TensorSpec.h"
 #include "llvm/Support/ErrorHandling.h"
 
 #include <memory>
@@ -30,21 +31,20 @@ class ReleaseModeModelRunner final : public MLModelRunner {
   /// FeatureNames' type should be an indexed collection of std::string, like
   /// std::array or std::vector, that has a size() method.
   template <class FType>
-  ReleaseModeModelRunner(LLVMContext &Ctx, const FType &FeatureNames,
+  ReleaseModeModelRunner(LLVMContext &Ctx, const FType &InputSpec,
                          StringRef DecisionName, StringRef FeedPrefix = "feed_",
                          StringRef FetchPrefix = "fetch_")
-      : MLModelRunner(Ctx, MLModelRunner::Kind::Release),
+      : MLModelRunner(Ctx, MLModelRunner::Kind::Release, InputSpec.size()),
         CompiledModel(std::make_unique<TGen>()) {
     assert(CompiledModel && "The CompiledModel should be valid");
 
-    const size_t FeatureCount = FeatureNames.size();
-    FeatureIndices.resize(FeatureCount);
-
-    for (size_t I = 0; I < FeatureCount; ++I) {
+    for (size_t I = 0; I < InputSpec.size(); ++I) {
       const int Index =
-          CompiledModel->LookupArgIndex(FeedPrefix.str() + FeatureNames[I]);
-      assert(Index >= 0 && "Cannot find Feature in inlining model");
-      FeatureIndices[I] = Index;
+          CompiledModel->LookupArgIndex(FeedPrefix.str() + InputSpec[I].name());
+      void *Buffer = nullptr;
+      if (Index >= 0)
+        Buffer = CompiledModel->arg_data(Index);
+      setUpBufferForTensor(I, InputSpec[I], Buffer);
     }
 
     ResultIndex = CompiledModel->LookupResultIndex(FetchPrefix.str() +
@@ -64,12 +64,6 @@ class ReleaseModeModelRunner final : public MLModelRunner {
     return CompiledModel->result_data(ResultIndex);
   }
 
-  void *getTensorUntyped(size_t Index) override {
-    return reinterpret_cast<char *>(
-        CompiledModel->arg_data(FeatureIndices[Index]));
-  }
-
-  std::vector<int32_t> FeatureIndices;
   int32_t ResultIndex = -1;
   std::unique_ptr<TGen> CompiledModel;
 };

diff  --git a/llvm/include/llvm/Analysis/TensorSpec.h b/llvm/include/llvm/Analysis/TensorSpec.h
index e4afcf90a0de0..382ab3f104452 100644
--- a/llvm/include/llvm/Analysis/TensorSpec.h
+++ b/llvm/include/llvm/Analysis/TensorSpec.h
@@ -74,6 +74,8 @@ class TensorSpec final {
   size_t getElementCount() const { return ElementCount; }
   /// Get the size, in bytes, of one element.
   size_t getElementByteSize() const { return ElementSize; }
+  /// Get the total size of a memory buffer needed to store the whole tensor.
+  size_t getTotalTensorBufferSize() const { return ElementCount * ElementSize; }
 
   template <typename T> bool isElementType() const {
     return getDataType<T>() == Type;

diff  --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
index 71c74a139a618..79ea160afc224 100644
--- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
+++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
@@ -272,8 +272,8 @@ static const std::vector<TensorSpec> TrainingOnlyFeatures{
 static const std::vector<TensorSpec> getInputFeatures() {
   std::vector<TensorSpec> InputSpecs;
   for (size_t I = 0; I < NumberOfFeatures; ++I)
-    InputSpecs.push_back(
-        TensorSpec::createSpec<int64_t>(TFFeedPrefix + FeatureNameMap[I], {1}));
+    InputSpecs.push_back(TensorSpec::createSpec<int64_t>(
+        TFFeedPrefix + FeatureMap[I].name(), FeatureMap[I].shape()));
   append_range(InputSpecs, TrainingOnlyFeatures);
   return InputSpecs;
 }
@@ -289,8 +289,7 @@ TrainingLogger::TrainingLogger(StringRef LogFileName,
   std::vector<LoggedFeatureSpec> FT;
 
   for (size_t I = 0; I < NumberOfFeatures; ++I)
-    FT.push_back(
-        {TensorSpec::createSpec<int64_t>(FeatureNameMap.at(I), {1}), None});
+    FT.push_back({FeatureMap.at(I), None});
   if (MUTR && MUTR->outputLoggedFeatureSpecs().size() > 1)
     append_range(FT, drop_begin(MUTR->outputLoggedFeatureSpecs()));
 

diff  --git a/llvm/lib/Analysis/MLInlineAdvisor.cpp b/llvm/lib/Analysis/MLInlineAdvisor.cpp
index 2459db705da4b..cc454dd3687df 100644
--- a/llvm/lib/Analysis/MLInlineAdvisor.cpp
+++ b/llvm/lib/Analysis/MLInlineAdvisor.cpp
@@ -37,7 +37,7 @@ std::unique_ptr<InlineAdvisor>
 llvm::getReleaseModeAdvisor(Module &M, ModuleAnalysisManager &MAM) {
   auto AOTRunner =
       std::make_unique<ReleaseModeModelRunner<llvm::InlinerSizeModel>>(
-          M.getContext(), FeatureNameMap, DecisionName);
+          M.getContext(), FeatureMap, DecisionName);
   return std::make_unique<MLInlineAdvisor>(M, MAM, std::move(AOTRunner));
 }
 #endif
@@ -51,14 +51,14 @@ static cl::opt<float> SizeIncreaseThreshold(
     cl::init(2.0));
 
 // clang-format off
-const std::array<std::string, NumberOfFeatures> llvm::FeatureNameMap{
+const std::array<TensorSpec, NumberOfFeatures> llvm::FeatureMap{
+#define POPULATE_NAMES(_, NAME) TensorSpec::createSpec<int64_t>(NAME, {1} ),
 // InlineCost features - these must come first
-#define POPULATE_NAMES(INDEX_NAME, NAME) NAME,
   INLINE_COST_FEATURE_ITERATOR(POPULATE_NAMES)
 #undef POPULATE_NAMES
 
 // Non-cost features
-#define POPULATE_NAMES(INDEX_NAME, NAME, COMMENT) NAME,
+#define POPULATE_NAMES(_, NAME, __) TensorSpec::createSpec<int64_t>(NAME, {1} ),
   INLINE_FEATURE_ITERATOR(POPULATE_NAMES)
 #undef POPULATE_NAMES
 };
@@ -364,7 +364,7 @@ void MLInlineAdvice::reportContextForRemark(
   using namespace ore;
   OR << NV("Callee", Callee->getName());
   for (size_t I = 0; I < NumberOfFeatures; ++I)
-    OR << NV(FeatureNameMap[I],
+    OR << NV(FeatureMap[I].name(),
              *getAdvisor()->getModelRunner().getTensor<int64_t>(I));
   OR << NV("ShouldInline", isInliningRecommended());
 }

diff  --git a/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp b/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp
index fab51d6a7aafc..d3cbfeda3ca1d 100644
--- a/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp
+++ b/llvm/lib/Analysis/ModelUnderTrainingRunner.cpp
@@ -11,6 +11,7 @@
 //
 //===----------------------------------------------------------------------===//
 
+#include "llvm/Analysis/TensorSpec.h"
 #include "llvm/Config/config.h"
 #if defined(LLVM_HAVE_TF_API)
 
@@ -22,7 +23,7 @@ ModelUnderTrainingRunner::ModelUnderTrainingRunner(
     LLVMContext &Ctx, const std::string &ModelPath,
     const std::vector<TensorSpec> &InputSpecs,
     const std::vector<LoggedFeatureSpec> &OutputSpecs)
-    : MLModelRunner(Ctx, MLModelRunner::Kind::Development),
+    : MLModelRunner(Ctx, MLModelRunner::Kind::Development, InputSpecs.size()),
       OutputSpecs(OutputSpecs) {
   Evaluator = std::make_unique<TFModelEvaluator>(
       ModelPath, InputSpecs, [&](size_t I) { return OutputSpecs[I].Spec; },
@@ -32,6 +33,10 @@ ModelUnderTrainingRunner::ModelUnderTrainingRunner(
     Evaluator.reset();
     return;
   }
+
+  for (size_t I = 0, E = InputSpecs.size(); I < E; ++I) {
+    setUpBufferForTensor(I, InputSpecs[I], Evaluator->getUntypedInput(I));
+  }
 }
 
 void *ModelUnderTrainingRunner::evaluateUntyped() {
@@ -43,24 +48,31 @@ void *ModelUnderTrainingRunner::evaluateUntyped() {
   return LastEvaluationResult->getUntypedTensorValue(0);
 }
 
-void *ModelUnderTrainingRunner::getTensorUntyped(size_t Index) {
-  return Evaluator->getUntypedInput(Index);
-}
-
 std::unique_ptr<ModelUnderTrainingRunner>
 ModelUnderTrainingRunner::createAndEnsureValid(
     LLVMContext &Ctx, const std::string &ModelPath, StringRef DecisionName,
     const std::vector<TensorSpec> &InputSpecs,
     StringRef OutputSpecsPathOverride) {
-  std::unique_ptr<ModelUnderTrainingRunner> MUTR;
   if (auto MaybeOutputSpecs = loadOutputSpecs(Ctx, DecisionName, ModelPath,
                                               OutputSpecsPathOverride))
-    MUTR.reset(new ModelUnderTrainingRunner(Ctx, ModelPath, InputSpecs,
-                                            *MaybeOutputSpecs));
+    return createAndEnsureValid(Ctx, ModelPath, DecisionName, InputSpecs,
+                                *MaybeOutputSpecs);
+  Ctx.emitError("Could not load the policy model from the provided path");
+  return nullptr;
+}
+
+std::unique_ptr<ModelUnderTrainingRunner>
+ModelUnderTrainingRunner::createAndEnsureValid(
+    LLVMContext &Ctx, const std::string &ModelPath, StringRef DecisionName,
+    const std::vector<TensorSpec> &InputSpecs,
+    const std::vector<LoggedFeatureSpec> &OutputSpecs) {
+  std::unique_ptr<ModelUnderTrainingRunner> MUTR;
+  MUTR.reset(
+      new ModelUnderTrainingRunner(Ctx, ModelPath, InputSpecs, OutputSpecs));
   if (MUTR && MUTR->isValid())
     return MUTR;
 
-  Ctx.emitError("Could not load the policy model from the provided path");
+  Ctx.emitError("Could not load or create model evaluator.");
   return nullptr;
 }
 

diff  --git a/llvm/lib/Analysis/NoInferenceModelRunner.cpp b/llvm/lib/Analysis/NoInferenceModelRunner.cpp
index 7178120ebe4fc..1914b22f5d71d 100644
--- a/llvm/lib/Analysis/NoInferenceModelRunner.cpp
+++ b/llvm/lib/Analysis/NoInferenceModelRunner.cpp
@@ -10,24 +10,14 @@
 // logs for the default policy, in 'development' mode, but never ask it to
 // 'run'.
 //===----------------------------------------------------------------------===//
-#include "llvm/Config/config.h"
-#if defined(LLVM_HAVE_TF_API)
-
 #include "llvm/Analysis/NoInferenceModelRunner.h"
-#include "llvm/Analysis/Utils/TFUtils.h"
 
 using namespace llvm;
 
 NoInferenceModelRunner::NoInferenceModelRunner(
     LLVMContext &Ctx, const std::vector<TensorSpec> &Inputs)
-    : MLModelRunner(Ctx, MLModelRunner::Kind::NoOp) {
-  ValuesBuffer.reserve(Inputs.size());
+    : MLModelRunner(Ctx, MLModelRunner::Kind::NoOp, Inputs.size()) {
+  size_t Index = 0;
   for (const auto &TS : Inputs)
-    ValuesBuffer.push_back(std::make_unique<char[]>(TS.getElementCount() *
-                                                    TS.getElementByteSize()));
-}
-
-void *NoInferenceModelRunner::getTensorUntyped(size_t Index) {
-  return ValuesBuffer[Index].get();
+    setUpBufferForTensor(Index++, TS, nullptr);
 }
-#endif // defined(LLVM_HAVE_TF_API)

diff  --git a/llvm/lib/Analysis/TFUtils.cpp b/llvm/lib/Analysis/TFUtils.cpp
index 3d4ef160824a1..203858c1cf06c 100644
--- a/llvm/lib/Analysis/TFUtils.cpp
+++ b/llvm/lib/Analysis/TFUtils.cpp
@@ -300,16 +300,29 @@ TFModelEvaluatorImpl::TFModelEvaluatorImpl(
     errs() << TF_Message(Status.get());
     invalidate();
   }
+  size_t NrSupported = 0;
   for (size_t I = 0; I < InputSpecs.size(); ++I) {
     auto &InputSpec = InputSpecs[I];
     InputFeed[I] = {
         TF_GraphOperationByName(Graph.get(), (InputSpec.name()).c_str()),
         InputSpec.port()};
+    if (!InputFeed[I].oper) {
+      continue;
+    }
+    if (NrSupported++ != I) {
+      errs()
+          << "Unsupported features must be placed at the end of the InputSpecs";
+      invalidate();
+      return;
+    }
     if (!checkReportAndInvalidate(InputFeed[I], InputSpec))
       return;
     initInput(I, static_cast<TF_DataType>(getTFTypeIndex(InputSpec.type())),
               InputSpec.shape());
   }
+  InputFeed.resize(NrSupported);
+  Input.resize(NrSupported);
+
   for (size_t I = 0; I < OutputSpecsSize; ++I) {
     auto OutputSpec = GetOutputSpecs(I);
     OutputFeed[I] = {
@@ -387,7 +400,9 @@ void TFModelEvaluatorImpl::initInput(size_t Index, TF_DataType Type,
 }
 
 void *TFModelEvaluator::getUntypedInput(size_t Index) {
-  return TF_TensorData(Impl->getInput()[Index]);
+  if (Index < Impl->getInput().size())
+    return TF_TensorData(Impl->getInput()[Index]);
+  return nullptr;
 }
 
 TFModelEvaluator::EvaluationResult::EvaluationResult(

diff  --git a/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp b/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp
index e01838e5fe115..7daf9025d3036 100644
--- a/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp
+++ b/llvm/lib/CodeGen/MLRegallocEvictAdvisor.cpp
@@ -15,6 +15,7 @@
 #include "RegAllocGreedy.h"
 #include "llvm/Analysis/AliasAnalysis.h"
 #include "llvm/Analysis/MLModelRunner.h"
+#include "llvm/Analysis/TensorSpec.h"
 #if defined(LLVM_HAVE_TF_AOT_REGALLOCEVICTMODEL) || defined(LLVM_HAVE_TF_API) 
 #include "llvm/Analysis/ModelUnderTrainingRunner.h"
 #include "llvm/Analysis/NoInferenceModelRunner.h"
@@ -320,14 +321,16 @@ class MLEvictAdvisor : public RegAllocEvictionAdvisor {
   mutable DenseMap<RegID, LIFeatureComponents> CachedFeatures;
 };
 
+#define _DECL_FEATURES(type, name, shape, _)                                   \
+  TensorSpec::createSpec<type>(#name, shape),
+
+static const std::vector<TensorSpec> InputFeatures{
+    {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)},
+};
+#undef _DECL_FEATURES
 // ===================================
 // Release (AOT) - specifics
 // ===================================
-const std::array<std::string, FeatureIDs::FeatureCount> FeatureNames{
-#define _GETNAME(_, NAME, __, ___) #NAME,
-    RA_EVICT_FEATURES_LIST(_GETNAME)
-#undef _GETNAME
-};
 class ReleaseModeEvictionAdvisorAnalysis final
     : public RegAllocEvictionAdvisorAnalysis {
 public:
@@ -349,7 +352,7 @@ class ReleaseModeEvictionAdvisorAnalysis final
   getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override {
     if (!Runner)
       Runner = std::make_unique<ReleaseModeModelRunner<CompiledModelType>>(
-          MF.getFunction().getContext(), FeatureNames, DecisionName);
+          MF.getFunction().getContext(), InputFeatures, DecisionName);
     return std::make_unique<MLEvictAdvisor>(
         MF, RA, Runner.get(), getAnalysis<MachineBlockFrequencyInfo>(),
         getAnalysis<MachineLoopInfo>());
@@ -363,13 +366,6 @@ class ReleaseModeEvictionAdvisorAnalysis final
 //
 // Features we log
 #ifdef LLVM_HAVE_TF_API
-#define _DECL_FEATURES(type, name, shape, _)                                   \
-  TensorSpec::createSpec<type>(#name, shape),
-
-static const std::vector<TensorSpec> InputFeatures{
-    {RA_EVICT_FEATURES_LIST(_DECL_FEATURES)},
-};
-#undef _DECL_FEATURES
 static const TensorSpec Output =
     TensorSpec::createSpec<int64_t>(DecisionName, {1});
 static const TensorSpec Reward = TensorSpec::createSpec<float>("reward", {1});

diff  --git a/llvm/unittests/Analysis/CMakeLists.txt b/llvm/unittests/Analysis/CMakeLists.txt
index 0656b82af4d52..ed2cb81055b38 100644
--- a/llvm/unittests/Analysis/CMakeLists.txt
+++ b/llvm/unittests/Analysis/CMakeLists.txt
@@ -6,7 +6,7 @@ set(LLVM_LINK_COMPONENTS
   TransformUtils
   )
 
-set(MLGO_TESTS TFUtilsTest.cpp MLModelRunnerTest.cpp)
+set(MLGO_TESTS TFUtilsTest.cpp)
 if (DEFINED LLVM_HAVE_TF_API)
   LIST(APPEND EXTRA_TESTS ${MLGO_TESTS})
 else()
@@ -39,6 +39,7 @@ add_llvm_unittest_with_input_files(AnalysisTests
   LoopNestTest.cpp
   MemoryBuiltinsTest.cpp
   MemorySSATest.cpp
+  MLModelRunnerTest.cpp
   PhiValuesTest.cpp
   ProfileSummaryInfoTest.cpp
   ScalarEvolutionTest.cpp

diff  --git a/llvm/unittests/Analysis/MLModelRunnerTest.cpp b/llvm/unittests/Analysis/MLModelRunnerTest.cpp
index 9794365ca51c6..05dadccbe1e72 100644
--- a/llvm/unittests/Analysis/MLModelRunnerTest.cpp
+++ b/llvm/unittests/Analysis/MLModelRunnerTest.cpp
@@ -8,10 +8,49 @@
 
 #include "llvm/Analysis/MLModelRunner.h"
 #include "llvm/Analysis/NoInferenceModelRunner.h"
+#include "llvm/Analysis/ReleaseModeModelRunner.h"
 #include "gtest/gtest.h"
 
 using namespace llvm;
 
+namespace llvm {
+// This is a mock of the kind of AOT-generated model evaluator. It has 2 tensors
+// of shape {1}, and 'evaluation' adds them.
+// The interface is the one expected by ReleaseModelRunner.
+class MockAOTModel final {
+  int64_t A = 0;
+  int64_t B = 0;
+  int64_t R = 0;
+
+public:
+  MockAOTModel() = default;
+  int LookupArgIndex(const std::string &Name) {
+    if (Name == "prefix_a")
+      return 0;
+    if (Name == "prefix_b")
+      return 1;
+    return -1;
+  }
+  int LookupResultIndex(const std::string &) { return 0; }
+  void Run() { R = A + B; }
+  void *result_data(int RIndex) {
+    if (RIndex == 0)
+      return &R;
+    return nullptr;
+  }
+  void *arg_data(int Index) {
+    switch (Index) {
+    case 0:
+      return &A;
+    case 1:
+      return &B;
+    default:
+      return nullptr;
+    }
+  }
+};
+} // namespace llvm
+
 TEST(NoInferenceModelRunner, AccessTensors) {
   const std::vector<TensorSpec> Inputs{
       TensorSpec::createSpec<int64_t>("F1", {1}),
@@ -30,4 +69,51 @@ TEST(NoInferenceModelRunner, AccessTensors) {
   ASSERT_EQ(NIMR.getTensor<int64_t>(0)[0], 1);
   ASSERT_EQ(NIMR.getTensor<int64_t>(1)[8], 9);
   ASSERT_EQ(NIMR.getTensor<float>(2)[1], 0.2f);
+}
+
+TEST(ReleaseModeRunner, NormalUse) {
+  LLVMContext Ctx;
+  std::vector<TensorSpec> Inputs{TensorSpec::createSpec<int64_t>("a", {1}),
+                                 TensorSpec::createSpec<int64_t>("b", {1})};
+  auto Evaluator = std::make_unique<ReleaseModeModelRunner<MockAOTModel>>(
+      Ctx, Inputs, "", "prefix_");
+  *Evaluator->getTensor<int64_t>(0) = 1;
+  *Evaluator->getTensor<int64_t>(1) = 2;
+  EXPECT_EQ(Evaluator->evaluate<int64_t>(), 3);
+  EXPECT_EQ(*Evaluator->getTensor<int64_t>(0), 1);
+  EXPECT_EQ(*Evaluator->getTensor<int64_t>(1), 2);
+}
+
+TEST(ReleaseModeRunner, ExtraFeatures) {
+  LLVMContext Ctx;
+  std::vector<TensorSpec> Inputs{TensorSpec::createSpec<int64_t>("a", {1}),
+                                 TensorSpec::createSpec<int64_t>("b", {1}),
+                                 TensorSpec::createSpec<int64_t>("c", {1})};
+  auto Evaluator = std::make_unique<ReleaseModeModelRunner<MockAOTModel>>(
+      Ctx, Inputs, "", "prefix_");
+  *Evaluator->getTensor<int64_t>(0) = 1;
+  *Evaluator->getTensor<int64_t>(1) = 2;
+  *Evaluator->getTensor<int64_t>(2) = -3;
+  EXPECT_EQ(Evaluator->evaluate<int64_t>(), 3);
+  EXPECT_EQ(*Evaluator->getTensor<int64_t>(0), 1);
+  EXPECT_EQ(*Evaluator->getTensor<int64_t>(1), 2);
+  EXPECT_EQ(*Evaluator->getTensor<int64_t>(2), -3);
+}
+
+TEST(ReleaseModeRunner, ExtraFeaturesOutOfOrder) {
+  LLVMContext Ctx;
+  std::vector<TensorSpec> Inputs{
+      TensorSpec::createSpec<int64_t>("a", {1}),
+      TensorSpec::createSpec<int64_t>("c", {1}),
+      TensorSpec::createSpec<int64_t>("b", {1}),
+  };
+  auto Evaluator = std::make_unique<ReleaseModeModelRunner<MockAOTModel>>(
+      Ctx, Inputs, "", "prefix_");
+  *Evaluator->getTensor<int64_t>(0) = 1;         // a
+  *Evaluator->getTensor<int64_t>(1) = 2;         // c
+  *Evaluator->getTensor<int64_t>(2) = -3;        // b
+  EXPECT_EQ(Evaluator->evaluate<int64_t>(), -2); // a + b
+  EXPECT_EQ(*Evaluator->getTensor<int64_t>(0), 1);
+  EXPECT_EQ(*Evaluator->getTensor<int64_t>(1), 2);
+  EXPECT_EQ(*Evaluator->getTensor<int64_t>(2), -3);
 }
\ No newline at end of file

diff  --git a/llvm/unittests/Analysis/TFUtilsTest.cpp b/llvm/unittests/Analysis/TFUtilsTest.cpp
index a1495e9b6bbfb..6ec129cf413d7 100644
--- a/llvm/unittests/Analysis/TFUtilsTest.cpp
+++ b/llvm/unittests/Analysis/TFUtilsTest.cpp
@@ -10,6 +10,8 @@
 #include "google/protobuf/struct.pb.h"
 #include "tensorflow/core/example/example.pb.h"
 #include "tensorflow/core/example/feature.pb.h"
+#include "llvm/Analysis/ModelUnderTrainingRunner.h"
+#include "llvm/Analysis/TensorSpec.h"
 #include "llvm/AsmParser/Parser.h"
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/Instructions.h"
@@ -102,6 +104,36 @@ TEST(TFUtilsTest, EvalError) {
   EXPECT_FALSE(Evaluator.isValid());
 }
 
+TEST(TFUtilsTest, UnsupportedFeature) {
+  const static int64_t KnownSize = 214;
+  std::vector<TensorSpec> InputSpecs{
+      TensorSpec::createSpec<int32_t>("serving_default_input_1",
+                                      {1, KnownSize}),
+      TensorSpec::createSpec<float>("this_feature_does_not_exist", {2, 5})};
+
+  LLVMContext Ctx;
+  auto Evaluator = ModelUnderTrainingRunner::createAndEnsureValid(
+      Ctx, getModelPath(), "StatefulPartitionedCall", InputSpecs,
+      {LoggedFeatureSpec{
+          TensorSpec::createSpec<float>("StatefulPartitionedCall", {1}),
+          None}});
+  int32_t *V = Evaluator->getTensor<int32_t>(0);
+  // Fill it up with 1s, we know the output.
+  for (auto I = 0; I < KnownSize; ++I)
+    V[I] = 1;
+
+  float *F = Evaluator->getTensor<float>(1);
+  for (auto I = 0; I < 2 * 5; ++I)
+    F[I] = 3.14 + I;
+  float Ret = Evaluator->evaluate<float>();
+  EXPECT_EQ(static_cast<int64_t>(Ret), 80);
+  // The input vector should be unchanged
+  for (auto I = 0; I < KnownSize; ++I)
+    EXPECT_EQ(V[I], 1);
+  for (auto I = 0; I < 2 * 5; ++I)
+    EXPECT_FLOAT_EQ(F[I], 3.14 + I);
+}
+
 #define PROTO_CHECKER(FNAME, TYPE, INDEX, EXP)                                 \
   do {                                                                         \
     const auto &V = Expected.feature_lists()                                   \


        


More information about the llvm-commits mailing list