[llvm] 3a462d8 - [mlgo] drop the prefix `_` in `_model_selector`

Mircea Trofin via llvm-commits llvm-commits at lists.llvm.org
Wed Jun 26 15:47:32 PDT 2024


Author: Mircea Trofin
Date: 2024-06-26T15:47:23-07:00
New Revision: 3a462d89221a307c037a653bc9e0c9190f79c1b9

URL: https://github.com/llvm/llvm-project/commit/3a462d89221a307c037a653bc9e0c9190f79c1b9
DIFF: https://github.com/llvm/llvm-project/commit/3a462d89221a307c037a653bc9e0c9190f79c1b9.diff

LOG: [mlgo] drop the prefix `_` in `_model_selector`

`_` upsets the saved model freezer (assumptions about python naming).

Added: 
    

Modified: 
    llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
    llvm/unittests/Analysis/MLModelRunnerTest.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h b/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
index 9fb4ff4ec4013..641036d8fdb4e 100644
--- a/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
+++ b/llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
@@ -71,7 +71,7 @@ class ReleaseModeModelRunner final : public MLModelRunner {
     //   of the model selector to {high, low}
     bool InputIsPresent = true;
     populateTensor(InputSpec.size(),
-                   TensorSpec::createSpec<uint64_t>("_model_selector", {2}),
+                   TensorSpec::createSpec<uint64_t>("model_selector", {2}),
                    Options.FeedPrefix, InputIsPresent);
 
     // If we hit the "report an error" cases outlined above, continue with the
@@ -80,13 +80,13 @@ class ReleaseModeModelRunner final : public MLModelRunner {
     if (Options.ModelSelector.empty() && InputIsPresent)
       Ctx.emitError(
           "A model selector was not specified but the underlying model "
-          "requires selecting one because it exposes a _model_selector input");
+          "requires selecting one because it exposes a model_selector input");
     uint64_t High = 0;
     uint64_t Low = 0;
     if (!Options.ModelSelector.empty()) {
       if (!InputIsPresent)
         Ctx.emitError("A model selector was specified but the underlying model "
-                      "does not expose a _model_selector input");
+                      "does not expose a model_selector input");
       const auto Hash = MD5::hash(arrayRefFromStringRef(Options.ModelSelector));
       High = Hash.high();
       Low = Hash.low();
@@ -94,7 +94,7 @@ class ReleaseModeModelRunner final : public MLModelRunner {
     getTensor<uint64_t>(InputSpec.size())[0] = High;
     getTensor<uint64_t>(InputSpec.size())[1] = Low;
     // At this point, the model selector is set up. If the user didn't provide
-    // one, but the model has a _model_selector, it'll be set to (0, 0) which
+    // one, but the model has a model_selector, it'll be set to (0, 0) which
     // the composite model should treat as error as part of its implementation
     // (but that should only matter if there is a custom handler that doesn't
     // exit on error)

diff  --git a/llvm/unittests/Analysis/MLModelRunnerTest.cpp b/llvm/unittests/Analysis/MLModelRunnerTest.cpp
index 53ad2fc5c4b08..87b37f81163a4 100644
--- a/llvm/unittests/Analysis/MLModelRunnerTest.cpp
+++ b/llvm/unittests/Analysis/MLModelRunnerTest.cpp
@@ -102,7 +102,7 @@ class ComposedAOTModel final {
 public:
   ComposedAOTModel() = default;
   int LookupArgIndex(const std::string &Name) {
-    if (Name == "prefix__model_selector")
+    if (Name == "prefix_model_selector")
       return 2;
     return getModel()->LookupArgIndex(Name);
   }
@@ -201,7 +201,7 @@ TEST(ReleaseModelRunner, ModelSelectorNoInputFeaturePresent) {
   EXPECT_DEATH(std::make_unique<ReleaseModeModelRunner<AdditionAOTModel>>(
                    Ctx, Inputs, "", makeOptions().setModelSelector(M2Selector)),
                "A model selector was specified but the underlying model does "
-               "not expose a _model_selector input");
+               "not expose a model_selector input");
 }
 
 TEST(ReleaseModelRunner, ModelSelectorNoSelectorGiven) {
@@ -212,10 +212,10 @@ TEST(ReleaseModelRunner, ModelSelectorNoSelectorGiven) {
       std::make_unique<ReleaseModeModelRunner<ComposedAOTModel>>(
           Ctx, Inputs, "", makeOptions()),
       "A model selector was not specified but the underlying model requires "
-      "selecting one because it exposes a _model_selector input");
+      "selecting one because it exposes a model_selector input");
 }
 
-// Test that we correctly set up the _model_selector tensor value. We are only
+// Test that we correctly set up the model_selector tensor value. We are only
 // responsbile for what happens if the user doesn't specify a value (but the
 // model supports the feature), or if the user specifies one, and we correctly
 // populate the tensor, and do so upfront (in case the model implementation


        


More information about the llvm-commits mailing list