[llvm] 5b26f4f - Reland "[MLGO] ML Regalloc Priority Advisor"

Eric Wang via llvm-commits llvm-commits at lists.llvm.org
Fri Sep 30 14:27:53 PDT 2022


Author: Eric Wang
Date: 2022-09-30T16:27:26-05:00
New Revision: 5b26f4f042225f236205dcd73b1ed7d1a98c5b1a

URL: https://github.com/llvm/llvm-project/commit/5b26f4f042225f236205dcd73b1ed7d1a98c5b1a
DIFF: https://github.com/llvm/llvm-project/commit/5b26f4f042225f236205dcd73b1ed7d1a98c5b1a.diff

LOG: Reland "[MLGO] ML Regalloc Priority Advisor"

This relands commit 8f4f26ba5bd04f7b335836021e5e63b4236c0305, which was reverted in 91c96a806cae58539e40c9e443a08bde91ccc91e because of Buildbot failures. The previous model test is not compatible with tflite. e.g. https://lab.llvm.org/buildbot/#/builders/6/builds/14041

Differential Revision: https://reviews.llvm.org/D133616

Added: 
    llvm/lib/Analysis/models/gen-regalloc-priority-test-model.py
    llvm/lib/CodeGen/MLRegallocPriorityAdvisor.cpp
    llvm/test/CodeGen/MLRegalloc/Inputs/reference-prio-log-noml.txt
    llvm/test/CodeGen/MLRegalloc/dev-mode-prio-logging.ll

Modified: 
    llvm/lib/CodeGen/CMakeLists.txt
    llvm/lib/CodeGen/RegAllocPriorityAdvisor.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/lib/Analysis/models/gen-regalloc-priority-test-model.py b/llvm/lib/Analysis/models/gen-regalloc-priority-test-model.py
new file mode 100644
index 0000000000000..81de2c70565a8
--- /dev/null
+++ b/llvm/lib/Analysis/models/gen-regalloc-priority-test-model.py
@@ -0,0 +1,95 @@
+"""Generate a mock model for LLVM tests for Register Allocation.
+The generated model is not a neural net - it is just a tf.function with the
+correct input and output parameters. 
+"""
+## By construction, the mock model will always output the first liverange that can be evicted.
+
+import os
+import sys
+import tensorflow as tf
+POLICY_DECISION_LABEL = 'priority'
+POLICY_OUTPUT_SPEC = """
+[
+    {
+        "logging_name": "priority", 
+        "tensor_spec": {
+            "name": "StatefulPartitionedCall", 
+            "port": 0, 
+            "type": "float", 
+            "shape": [
+                1
+            ]
+        }
+    }
+]
+"""
+PER_LIVEINTERVAL_INT64_FEATURE_LIST = [
+    'li_size', 'stage'
+]
+PER_LIVEINTERVAL_FLOAT32_FEATURE_LIST = ['weight'
+]
+PER_LIVEINTERVAL_FEATURE_LIST = PER_LIVEINTERVAL_FLOAT32_FEATURE_LIST + \
+    PER_LIVEINTERVAL_INT64_FEATURE_LIST
+CONTEXT_FEATURE_LIST =  ('discount', 'reward', 'step_type')
+
+
+def get_input_signature():
+   """Returns (time_step_spec, action_spec) for LLVM register allocation."""
+   inputs = dict(
+       (key, tf.TensorSpec(dtype=tf.int64, shape=(), name=key))
+       for key in PER_LIVEINTERVAL_INT64_FEATURE_LIST)
+   inputs.update(
+       dict((key,
+             tf.TensorSpec(dtype=tf.float32, shape=(), name=key))
+            for key in PER_LIVEINTERVAL_FLOAT32_FEATURE_LIST))
+   inputs.update(
+       dict((key, tf.TensorSpec(dtype=tf.float32, shape=(), name=key))
+            for key in ['discount', 'reward']))
+   inputs.update(
+       dict((key, tf.TensorSpec(dtype=tf.int32, shape=(), name=key))
+            for key in ['step_type']))
+   return inputs
+
+
+def get_output_spec_path(path):
+   return os.path.join(path, 'output_spec.json')
+
+
+def build_mock_model(path):
+   """Build and save the mock model with the given signature."""
+   module = tf.Module()
+   # We have to set this useless variable in order for the TF C API to correctly
+   # intake it
+   module.var = tf.Variable(0, dtype=tf.float32)
+
+   def action(*inputs):
+     s1 = tf.reduce_sum([
+         tf.cast(inputs[0][key], tf.float32) for key in PER_LIVEINTERVAL_FEATURE_LIST
+     ],
+         axis=0)
+     s2 = tf.reduce_sum(
+         [tf.cast(inputs[0][key], tf.float32) for key in CONTEXT_FEATURE_LIST])
+     # Add a large number so s won't be 0.
+     s = s1 + s2
+     result = s + module.var
+     return {POLICY_DECISION_LABEL: result}
+   module.action = tf.function()(action)
+   action = {
+       'action': module.action.get_concrete_function(get_input_signature())
+   }
+
+   tf.saved_model.save(module, path, signatures=action)
+   output_spec_path = get_output_spec_path(path)
+   with open(output_spec_path, 'w') as f:
+     print(f'Writing output spec to {output_spec_path}.')
+     f.write(POLICY_OUTPUT_SPEC)
+
+
+def main(argv):
+   assert len(argv) == 2
+   model_path = argv[1]
+   build_mock_model(model_path)
+
+
+if __name__ == '__main__':
+   main(sys.argv)

diff  --git a/llvm/lib/CodeGen/CMakeLists.txt b/llvm/lib/CodeGen/CMakeLists.txt
index afa37044c81ec..ce14e1d9d545d 100644
--- a/llvm/lib/CodeGen/CMakeLists.txt
+++ b/llvm/lib/CodeGen/CMakeLists.txt
@@ -143,6 +143,7 @@ add_llvm_component_library(LLVMCodeGen
   MIRSampleProfile.cpp
   MIRYamlMapping.cpp
   MLRegallocEvictAdvisor.cpp
+  MLRegallocPriorityAdvisor.cpp
   ModuloSchedule.cpp
   MultiHazardRecognizer.cpp
   PatchableFunction.cpp

diff  --git a/llvm/lib/CodeGen/MLRegallocPriorityAdvisor.cpp b/llvm/lib/CodeGen/MLRegallocPriorityAdvisor.cpp
new file mode 100644
index 0000000000000..cda1e4e8f5126
--- /dev/null
+++ b/llvm/lib/CodeGen/MLRegallocPriorityAdvisor.cpp
@@ -0,0 +1,335 @@
+//===- MLRegAllocPriorityAdvisor.cpp - ML priority advisor-----------------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// Implementation of the ML priority advisor and reward injection pass
+//
+//===----------------------------------------------------------------------===//
+
+#include "AllocationOrder.h"
+#include "RegAllocGreedy.h"
+#include "RegAllocPriorityAdvisor.h"
+#include "llvm/Analysis/AliasAnalysis.h"
+#include "llvm/Analysis/MLModelRunner.h"
+#include "llvm/Analysis/ReleaseModeModelRunner.h"
+#include "llvm/Analysis/TensorSpec.h"
+#include "llvm/CodeGen/CalcSpillWeights.h"
+#include "llvm/CodeGen/LiveRegMatrix.h"
+#include "llvm/CodeGen/MachineBlockFrequencyInfo.h"
+#include "llvm/CodeGen/MachineFunction.h"
+#include "llvm/CodeGen/MachineLoopInfo.h"
+#include "llvm/CodeGen/MachineRegisterInfo.h"
+#include "llvm/CodeGen/Passes.h"
+#include "llvm/CodeGen/RegisterClassInfo.h"
+#include "llvm/CodeGen/SlotIndexes.h"
+#include "llvm/CodeGen/VirtRegMap.h"
+#include "llvm/InitializePasses.h"
+#include "llvm/Pass.h"
+#include "llvm/PassRegistry.h"
+#include "llvm/Support/CommandLine.h"
+
+#if defined(LLVM_HAVE_TF_API)
+#include "llvm/Analysis/ModelUnderTrainingRunner.h"
+#include "llvm/Analysis/NoInferenceModelRunner.h"
+#include "llvm/Analysis/Utils/TrainingLogger.h"
+#endif
+
+using namespace llvm;
+
+// Options that only make sense in development mode
+#ifdef LLVM_HAVE_TF_API
+#include "RegAllocScore.h"
+#include "llvm/Analysis/Utils/TFUtils.h"
+
+static cl::opt<std::string> TrainingLog(
+    "regalloc-priority-training-log", cl::Hidden,
+    cl::desc("Training log for the register allocator priority model"));
+
+static cl::opt<std::string> ModelUnderTraining(
+    "regalloc-priority-model", cl::Hidden,
+    cl::desc("The model being trained for register allocation priority"));
+
+#endif // #ifdef LLVM_HAVE_TF_API
+
+namespace llvm {
+
+static const std::vector<int64_t> PerLiveRangeShape{1};
+
+#define RA_PRIORITY_FEATURES_LIST(M)                                           \
+  M(int64_t, li_size, PerLiveRangeShape, "size")                               \
+  M(int64_t, stage, PerLiveRangeShape, "stage")                                \
+  M(float, weight, PerLiveRangeShape, "weight")
+
+#define DecisionName "priority"
+
+// Named features index.
+enum FeatureIDs {
+#define _FEATURE_IDX(_, name, __, ___) name,
+  RA_PRIORITY_FEATURES_LIST(_FEATURE_IDX)
+#undef _FEATURE_IDX
+      FeatureCount
+};
+
+class MLPriorityAdvisor : public RegAllocPriorityAdvisor {
+public:
+  MLPriorityAdvisor(const MachineFunction &MF, const RAGreedy &RA,
+                    SlotIndexes *const Indexes, MLModelRunner *Runner);
+
+protected:
+  const RegAllocPriorityAdvisor &getDefaultAdvisor() const {
+    return static_cast<const RegAllocPriorityAdvisor &>(DefaultAdvisor);
+  }
+
+  // The assumption is that if the Runner could not be constructed, we emit-ed
+  // error, and we shouldn't be asking for it here.
+  const MLModelRunner &getRunner() const { return *Runner; }
+  float getPriorityImpl(const LiveInterval &LI) const;
+  unsigned getPriority(const LiveInterval &LI) const override;
+
+private:
+  const DefaultPriorityAdvisor DefaultAdvisor;
+  MLModelRunner *const Runner;
+};
+
+#define _DECL_FEATURES(type, name, shape, _)                                   \
+  TensorSpec::createSpec<type>(#name, shape),
+
+static const std::vector<TensorSpec> InputFeatures{
+    {RA_PRIORITY_FEATURES_LIST(_DECL_FEATURES)},
+};
+#undef _DECL_FEATURES
+
+// ===================================
+// Release (AOT) - specifics
+// ===================================
+class ReleaseModePriorityAdvisorAnalysis final
+    : public RegAllocPriorityAdvisorAnalysis {
+public:
+  ReleaseModePriorityAdvisorAnalysis()
+      : RegAllocPriorityAdvisorAnalysis(AdvisorMode::Release) {}
+  // support for isa<> and dyn_cast.
+  static bool classof(const RegAllocPriorityAdvisorAnalysis *R) {
+    return R->getAdvisorMode() == AdvisorMode::Release;
+  }
+
+private:
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+    AU.addRequired<SlotIndexes>();
+    RegAllocPriorityAdvisorAnalysis::getAnalysisUsage(AU);
+  }
+
+  std::unique_ptr<RegAllocPriorityAdvisor>
+  getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override {
+    if (!Runner)
+      Runner = std::make_unique<ReleaseModeModelRunner<NoopSavedModelImpl>>(
+          MF.getFunction().getContext(), InputFeatures, DecisionName);
+    return std::make_unique<MLPriorityAdvisor>(
+        MF, RA, &getAnalysis<SlotIndexes>(), Runner.get());
+  }
+  std::unique_ptr<ReleaseModeModelRunner<NoopSavedModelImpl>> Runner;
+};
+
+// ===================================
+// Development mode-specifics
+// ===================================
+//
+// Features we log
+#ifdef LLVM_HAVE_TF_API
+
+static const TensorSpec Output =
+    TensorSpec::createSpec<float>(DecisionName, {1});
+static const TensorSpec Reward = TensorSpec::createSpec<float>("reward", {1});
+
+#define _DECL_TRAIN_FEATURES(type, name, shape, _)                             \
+  TensorSpec::createSpec<type>(std::string("action_") + #name, shape),
+
+static const std::vector<TensorSpec> TrainingInputFeatures{
+    {RA_PRIORITY_FEATURES_LIST(_DECL_TRAIN_FEATURES)
+         TensorSpec::createSpec<float>("action_discount", {1}),
+     TensorSpec::createSpec<int32_t>("action_step_type", {1}),
+     TensorSpec::createSpec<float>("action_reward", {1})}};
+#undef _DECL_TRAIN_FEATURES
+
+class DevelopmentModePriorityAdvisor : public MLPriorityAdvisor {
+public:
+  DevelopmentModePriorityAdvisor(const MachineFunction &MF, const RAGreedy &RA,
+                                 SlotIndexes *const Indexes,
+                                 MLModelRunner *Runner, Logger *Log)
+      : MLPriorityAdvisor(MF, RA, Indexes, Runner), Log(Log) {}
+
+private:
+  unsigned getPriority(const LiveInterval &LI) const override;
+  Logger *const Log;
+};
+
+class DevelopmentModePriorityAdvisorAnalysis final
+    : public RegAllocPriorityAdvisorAnalysis {
+public:
+  DevelopmentModePriorityAdvisorAnalysis()
+      : RegAllocPriorityAdvisorAnalysis(AdvisorMode::Development) {}
+  // support for isa<> and dyn_cast.
+  static bool classof(const RegAllocPriorityAdvisorAnalysis *R) {
+    return R->getAdvisorMode() == AdvisorMode::Development;
+  }
+
+  /// get the logger for the given function, or nullptr if we didn't collect
+  /// one. This is used to inject the score by the RegAllocScoring pass.
+  Logger *getLogger(const MachineFunction &MF) const {
+    auto I = LogMap.find(MF.getName());
+    if (I == LogMap.end())
+      return nullptr;
+    return I->second.get();
+  }
+
+  void logRewardIfNeeded(const MachineFunction &MF,
+                         llvm::function_ref<float()> GetReward) override {
+    if (auto *Log = this->getLogger(MF))
+      Log->logFloatFinalReward(GetReward());
+  }
+
+private:
+  void getAnalysisUsage(AnalysisUsage &AU) const override {
+    AU.setPreservesAll();
+    AU.addRequired<SlotIndexes>();
+    RegAllocPriorityAdvisorAnalysis::getAnalysisUsage(AU);
+  }
+
+  // Save all the logs (when requested).
+  bool doFinalization(Module &M) override {
+    if (TrainingLog.empty())
+      return false;
+    std::error_code EC;
+    auto OS = std::make_unique<raw_fd_ostream>(TrainingLog, EC);
+    if (EC) {
+      M.getContext().emitError(EC.message() + ":" + TrainingLog);
+      return false;
+    }
+    Logger::flushLogs(*OS, LogMap);
+    return false;
+  }
+
+  std::unique_ptr<RegAllocPriorityAdvisor>
+  getAdvisor(const MachineFunction &MF, const RAGreedy &RA) override {
+
+    LLVMContext &Ctx = MF.getFunction().getContext();
+    if (ModelUnderTraining.empty() && TrainingLog.empty()) {
+      Ctx.emitError("Regalloc development mode should be requested with at "
+                    "least logging enabled and/or a training model");
+      return nullptr;
+    }
+    if (!Runner) {
+      if (ModelUnderTraining.empty())
+        Runner = std::make_unique<NoInferenceModelRunner>(Ctx, InputFeatures);
+      else
+        Runner = ModelUnderTrainingRunner::createAndEnsureValid(
+            Ctx, ModelUnderTraining, DecisionName, TrainingInputFeatures);
+      if (!Runner) {
+        Ctx.emitError("Regalloc: could not set up the model runner");
+        return nullptr;
+      }
+    }
+
+    Logger *Log = nullptr;
+    if (!TrainingLog.empty()) {
+      std::vector<LoggedFeatureSpec> LFS;
+      for (const auto &FS : InputFeatures)
+        LFS.push_back({FS, None});
+      if (auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(Runner.get()))
+        if (MUTR->outputLoggedFeatureSpecs().size() > 1)
+          append_range(LFS, drop_begin(MUTR->outputLoggedFeatureSpecs()));
+      // We always log the output; in particular, if we're not evaluating, we
+      // don't have an output spec json file. That's why we handle the
+      // 'normal' output separately.
+      LFS.push_back({Output, None});
+      auto I = LogMap.insert(std::make_pair(
+          MF.getFunction().getName(),
+          std::make_unique<Logger>(LFS, Reward, /*IncludeReward*/ true)));
+      assert(I.second);
+      Log = I.first->second.get();
+    }
+
+    return std::make_unique<DevelopmentModePriorityAdvisor>(
+        MF, RA, &getAnalysis<SlotIndexes>(), Runner.get(), Log);
+  }
+
+  std::unique_ptr<MLModelRunner> Runner;
+  StringMap<std::unique_ptr<Logger>> LogMap;
+};
+#endif //#ifdef LLVM_HAVE_TF_API
+
+} // namespace llvm
+
+RegAllocPriorityAdvisorAnalysis *llvm::createReleaseModePriorityAdvisor() {
+  return new ReleaseModePriorityAdvisorAnalysis();
+}
+
+MLPriorityAdvisor::MLPriorityAdvisor(const MachineFunction &MF,
+                                     const RAGreedy &RA,
+                                     SlotIndexes *const Indexes,
+                                     MLModelRunner *Runner)
+    : RegAllocPriorityAdvisor(MF, RA, Indexes), DefaultAdvisor(MF, RA, Indexes),
+      Runner(std::move(Runner)) {
+  assert(this->Runner);
+}
+
+float MLPriorityAdvisor::getPriorityImpl(const LiveInterval &LI) const {
+  const unsigned Size = LI.getSize();
+  LiveRangeStage Stage = RA.getExtraInfo().getStage(LI);
+
+  *Runner->getTensor<int64_t>(0) = static_cast<int64_t>(Size);
+  *Runner->getTensor<int64_t>(1) = static_cast<int64_t>(Stage);
+  *Runner->getTensor<float>(2) = static_cast<float>(LI.weight());
+
+  return Runner->evaluate<float>();
+}
+
+unsigned MLPriorityAdvisor::getPriority(const LiveInterval &LI) const {
+  return static_cast<unsigned>(getPriorityImpl(LI));
+}
+
+#ifdef LLVM_HAVE_TF_API
+RegAllocPriorityAdvisorAnalysis *llvm::createDevelopmentModePriorityAdvisor() {
+  return new DevelopmentModePriorityAdvisorAnalysis();
+}
+
+unsigned
+DevelopmentModePriorityAdvisor::getPriority(const LiveInterval &LI) const {
+  double Prio = 0;
+
+  if (isa<ModelUnderTrainingRunner>(getRunner())) {
+    Prio = MLPriorityAdvisor::getPriorityImpl(LI);
+  } else {
+    Prio = getDefaultAdvisor().getPriority(LI);
+  }
+
+  if (TrainingLog.empty())
+    return Prio;
+
+  size_t CurrentFeature = 0;
+  for (; CurrentFeature < InputFeatures.size(); ++CurrentFeature) {
+    Log->logSpecifiedTensorValue(
+        CurrentFeature, reinterpret_cast<const char *>(
+                            getRunner().getTensorUntyped(CurrentFeature)));
+  }
+
+  if (auto *MUTR = dyn_cast<ModelUnderTrainingRunner>(&getRunner())) {
+    for (size_t I = 1; I < MUTR->outputLoggedFeatureSpecs().size();
+         ++I, ++CurrentFeature)
+      Log->logSpecifiedTensorValue(
+          CurrentFeature,
+          reinterpret_cast<const char *>(
+              MUTR->lastEvaluationResult()->getUntypedTensorValue(I)));
+  }
+
+  float Ret = static_cast<float>(Prio);
+  Log->logFloatValue(CurrentFeature, &Ret);
+
+  return static_cast<unsigned>(Prio);
+}
+
+#endif // #ifdef LLVM_HAVE_TF_API

diff  --git a/llvm/lib/CodeGen/RegAllocPriorityAdvisor.cpp b/llvm/lib/CodeGen/RegAllocPriorityAdvisor.cpp
index 5ace3ddba8769..88fd47e600983 100644
--- a/llvm/lib/CodeGen/RegAllocPriorityAdvisor.cpp
+++ b/llvm/lib/CodeGen/RegAllocPriorityAdvisor.cpp
@@ -76,10 +76,14 @@ template <> Pass *llvm::callDefaultCtor<RegAllocPriorityAdvisorAnalysis>() {
     Ret = new DefaultPriorityAdvisorAnalysis(/*NotAsRequested*/ false);
     break;
   case RegAllocPriorityAdvisorAnalysis::AdvisorMode::Development:
-    // TODO: add implementation
+#if defined(LLVM_HAVE_TF_API)
+    Ret = createDevelopmentModePriorityAdvisor();
+#endif
     break;
   case RegAllocPriorityAdvisorAnalysis::AdvisorMode::Release:
-    // TODO: add implementation
+#if defined(LLVM_HAVE_TF_AOT_REGALLOCPRIORITYMODEL)
+    Ret = createReleaseModePriorityAdvisor();
+#endif
     break;
   }
   if (Ret)

diff  --git a/llvm/test/CodeGen/MLRegalloc/Inputs/reference-prio-log-noml.txt b/llvm/test/CodeGen/MLRegalloc/Inputs/reference-prio-log-noml.txt
new file mode 100644
index 0000000000000..271480214546d
--- /dev/null
+++ b/llvm/test/CodeGen/MLRegalloc/Inputs/reference-prio-log-noml.txt
@@ -0,0 +1,361 @@
+fields {
+ key: "SyFgets"
+ value {
+ string_value: "feature_lists {
+ feature_list {
+ key: \"li_size\"  value {
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }  }  }
+ feature_list {
+ key: \"priority\"  value {
+ feature {  float_list {  value: 2.68435814e+09  }  }
+ feature {  float_list {  value: 2.68435814e+09  }  }
+ feature {  float_list {  value: 2.68435866e+09  }  }
+ feature {  float_list {  value: 2.68435789e+09  }  }
+ feature {  float_list {  value: 3.75810074e+09  }  }
+ feature {  float_list {  value: 3.7580969e+09  }  }
+ feature {  float_list {  value: 2.14748518e+09  }  }
+ feature {  float_list {  value: 2.14748493e+09  }  }
+ feature {  float_list {  value: 2.68435814e+09  }  }
+ feature {  float_list {  value: 2.14748493e+09  }  }
+ feature {  float_list {  value: 2.14748493e+09  }  }
+ feature {  float_list {  value: 2.68435456e+09  }  }
+ feature {  float_list {  value: 2.68435456e+09  }  }
+ feature {  float_list {  value: 2.68435763e+09  }  }
+ feature {  float_list {  value: 2.68435456e+09  }  }
+ feature {  float_list {  value: 2.68435456e+09  }  }
+ feature {  float_list {  value: 2.14748467e+09  }  }
+ feature {  float_list {  value: 2.68435456e+09  }  }
+ feature {  float_list {  value: 2.68435456e+09  }  }
+ feature {  float_list {  value: 2.68435456e+09  }  }
+ feature {  float_list {  value: 2.68435763e+09  }  }
+ feature {  float_list {  value: 2.68435763e+09  }  }
+ feature {  float_list {  value: 2.1474839e+09  }  }
+ feature {  float_list {  value: 2.1474839e+09  }  }
+ feature {  float_list {  value: 2.68435456e+09  }  }
+ feature {  float_list {  value: 2.1474839e+09  }  }
+ feature {  float_list {  value: 3.22122547e+09  }  }
+ feature {  float_list {  value: 2.68435456e+09  }  }
+ feature {  float_list {  value: 2.14748365e+09  }  }
+ feature {  float_list {  value: 2.14748493e+09  }  }
+ feature {  float_list {  value: 2.14748493e+09  }  }
+ feature {  float_list {  value: 2.68435814e+09  }  }
+ feature {  float_list {  value: 2.68435456e+09  }  }
+ feature {  float_list {  value: 2.68435584e+09  }  }
+ feature {  float_list {  value: 2.68435482e+09  }  }
+ feature {  float_list {  value: 2.68435482e+09  }  }
+ feature {  float_list {  value: 2.68435763e+09  }  }
+ feature {  float_list {  value: 2.68435584e+09  }  }
+ feature {  float_list {  value: 2.14748365e+09  }  }
+ feature {  float_list {  value: 3.75810074e+09  }  }
+ feature {  float_list {  value: 2.68435814e+09  }  }
+ feature {  float_list {  value: 3584  }  }
+ feature {  float_list {  value: 2.68435814e+09  }  }
+ feature {  float_list {  value: 3550  }  }
+ feature {  float_list {  value: 2.68435814e+09  }  }
+ feature {  float_list {  value: 3534  }  }
+ feature {  float_list {  value: 2974  }  }
+ feature {  float_list {  value: 2958  }  }
+ feature {  float_list {  value: 3.75809946e+09  }  }
+ feature {  float_list {  value: 2.68435866e+09  }  }
+ feature {  float_list {  value: 3998  }  }
+ feature {  float_list {  value: 3.75810074e+09  }  }
+ feature {  float_list {  value: 4336  }  }
+ feature {  float_list {  value: 2.6843584e+09  }  }
+ feature {  float_list {  value: 3.75809664e+09  }  }
+ feature {  float_list {  value: 2.68435482e+09  }  }
+ feature {  float_list {  value: 2.68435482e+09  }  }
+ feature {  float_list {  value: 2.68435814e+09  }  }
+ feature {  float_list {  value: 2.68435482e+09  }  }
+ feature {  float_list {  value: 2.68435456e+09  }  }
+ feature {  float_list {  value: 2.68435456e+09  }  }
+ feature {  float_list {  value: 2.68435814e+09  }  }
+ feature {  float_list {  value: 2.14748493e+09  }  }
+ feature {  float_list {  value: 2.68435456e+09  }  }
+ feature {  float_list {  value: 2.68435814e+09  }  }
+ feature {  float_list {  value: 2.14748493e+09  }  }
+ feature {  float_list {  value: 2.68435456e+09  }  }
+ feature {  float_list {  value: 2.68435456e+09  }  }
+ feature {  float_list {  value: 2.68435456e+09  }  }  }  }
+ feature_list {
+ key: \"reward\"  value {
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 36.6412773  }  }  }  }
+ feature_list {
+ key: \"stage\"  value {
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }
+ feature {  int64_list {  value: 0  }  }  }  }
+ feature_list {
+ key: \"weight\"  value {
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }
+ feature {  float_list {  value: 0  }  }  }  } } "
+ }
+}

diff  --git a/llvm/test/CodeGen/MLRegalloc/dev-mode-prio-logging.ll b/llvm/test/CodeGen/MLRegalloc/dev-mode-prio-logging.ll
new file mode 100644
index 0000000000000..05f059c87cda7
--- /dev/null
+++ b/llvm/test/CodeGen/MLRegalloc/dev-mode-prio-logging.ll
@@ -0,0 +1,30 @@
+; REQUIRES: have_tf_api
+; REQUIRES: x86_64-linux
+;
+; Check that we log correctly, both with a learned policy, and the default policy
+;
+; RUN: llc -mtriple=x86_64-linux-unknown -regalloc=greedy -regalloc-enable-priority-advisor=development \
+; RUN:   -regalloc-priority-training-log=%t1 -tfutils-text-log < %S/Inputs/input.ll
+; RUN: sed -i 's/ \+/ /g' %t1
+; RUN: sed -i 's/\\n key:/\n key:/g' %t1
+; RUN: sed -i 's/\\n feature/\n feature/g' %t1
+; RUN: sed -i 's/\\n/ /g' %t1
+; RUN: FileCheck --input-file %t1 %s --check-prefixes=CHECK,NOML
+; RUN: 
diff  %t1 %S/Inputs/reference-prio-log-noml.txt
+
+; RUN: rm -rf %t && mkdir %t
+; RUN: %python %S/../../../lib/Analysis/models/gen-regalloc-priority-test-model.py %t_savedmodel
+; RUN: %python %S/../../../lib/Analysis/models/saved-model-to-tflite.py %t_savedmodel %t
+; RUN: llc -mtriple=x86_64-linux-unknown -regalloc=greedy -regalloc-enable-priority-advisor=development \
+; RUN:   -regalloc-priority-training-log=%t2 -tfutils-text-log -regalloc-priority-model=%t < %S/Inputs/input.ll
+; RUN: sed -i 's/ \+/ /g' %t2
+; RUN: sed -i 's/\\n key:/\n key:/g' %t2
+; RUN: sed -i 's/\\n feature/\n feature/g' %t2
+; RUN: sed -i 's/\\n/ /g' %t2
+; RUN: FileCheck --input-file %t2 %s --check-prefixes=CHECK,ML
+
+; CHECK-NOT: nan
+; CHECK-LABEL: key: \"priority\"
+; NOML-NEXT: feature {  float_list {  value: 2.68435814e+09  }  }
+; ML-NEXT: feature {  float_list {  value: 3551  }  }
+; CHECK-LABEL: key: \"reward\"


        


More information about the llvm-commits mailing list