[llvm] 8ab2353 - [NFC][TFUtils] also include output specs lookup logic in loadOutputSpecs

Mircea Trofin via llvm-commits llvm-commits at lists.llvm.org
Wed Nov 18 21:20:41 PST 2020


Author: Mircea Trofin
Date: 2020-11-18T21:20:21-08:00
New Revision: 8ab2353a4c3dc430b28190b98b16418ef8ffce30

URL: https://github.com/llvm/llvm-project/commit/8ab2353a4c3dc430b28190b98b16418ef8ffce30
DIFF: https://github.com/llvm/llvm-project/commit/8ab2353a4c3dc430b28190b98b16418ef8ffce30.diff

LOG: [NFC][TFUtils] also include output specs lookup logic in loadOutputSpecs

The lookup logic is also reusable.

Also refactored the API to return the loaded vector - this makes it more
clear what state it is in in the case of error (as it won't be
returned).

Differential Revision: https://reviews.llvm.org/D91759

Added: 
    

Modified: 
    llvm/include/llvm/Analysis/Utils/TFUtils.h
    llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
    llvm/lib/Analysis/TFUtils.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/Utils/TFUtils.h b/llvm/include/llvm/Analysis/Utils/TFUtils.h
index 16aef19a7fb5..ea6bc2cf19ee 100644
--- a/llvm/include/llvm/Analysis/Utils/TFUtils.h
+++ b/llvm/include/llvm/Analysis/Utils/TFUtils.h
@@ -105,9 +105,14 @@ struct LoggedFeatureSpec {
   Optional<std::string> LoggingName;
 };
 
-bool loadOutputSpecs(LLVMContext &Ctx, StringRef FileName,
-                     StringRef ExpectedDecisionName,
-                     std::vector<LoggedFeatureSpec> &Ret);
+/// Load the output specs. If SpecFileOverride is not empty, that path is used.
+/// Otherwise, the file is assumed to be called 'output_spec.json' and be found
+/// under ModelPath (the model directory).
+/// The first output tensor name must match ExpectedDecisionName.
+/// In case of error, the return is None and the error is logged.
+Optional<std::vector<LoggedFeatureSpec>>
+loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
+                StringRef ModelPath, StringRef SpecFileOverride = StringRef());
 
 /// Logging utility - given an ordered specification of features, and assuming
 /// a scalar reward, allow logging feature values and rewards, and then print

diff  --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
index 41aa9a0fe2bf..37f43580c462 100644
--- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
+++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp
@@ -21,7 +21,6 @@
 #include "llvm/IR/LLVMContext.h"
 #include "llvm/Support/CommandLine.h"
 #include "llvm/Support/ManagedStatic.h"
-#include "llvm/Support/Path.h"
 
 #include <vector>
 
@@ -472,14 +471,10 @@ ModelUnderTrainingRunner::ModelUnderTrainingRunner(LLVMContext &Ctx,
         TensorSpec::createSpec<int64_t>(TFFeedPrefix + FeatureNameMap[I], {1}));
   InputSpecs.insert(InputSpecs.end(), TrainingOnlyFeatures.begin(),
                     TrainingOnlyFeatures.end());
-  SmallVector<char, 128> OutputSpecsPath;
-  StringRef OutputSpecPath = TFOutputSpecOverride;
-  if (OutputSpecPath.empty()) {
-    llvm::sys::path::append(OutputSpecsPath, ModelPath, "output_spec.json");
-    OutputSpecPath = {OutputSpecsPath.data(), OutputSpecsPath.size()};
-  }
-
-  if (!loadOutputSpecs(Ctx, OutputSpecPath, DecisionName, OutputSpecs))
+  if (auto MaybeOutSpecs =
+          loadOutputSpecs(Ctx, DecisionName, ModelPath, TFOutputSpecOverride))
+    OutputSpecs = std::move(*MaybeOutSpecs);
+  else
     return;
 
   Evaluator = std::make_unique<TFModelEvaluator>(

diff  --git a/llvm/lib/Analysis/TFUtils.cpp b/llvm/lib/Analysis/TFUtils.cpp
index 52cfe0b43366..1377cac217ab 100644
--- a/llvm/lib/Analysis/TFUtils.cpp
+++ b/llvm/lib/Analysis/TFUtils.cpp
@@ -19,6 +19,7 @@
 #include "llvm/Support/JSON.h"
 #include "llvm/Support/ManagedStatic.h"
 #include "llvm/Support/MemoryBuffer.h"
+#include "llvm/Support/Path.h"
 #include "llvm/Support/raw_ostream.h"
 
 #include "tensorflow/c/c_api.h"
@@ -216,27 +217,34 @@ Optional<TensorSpec> getTensorSpecFromJSON(LLVMContext &Ctx,
   return None;
 }
 
-bool loadOutputSpecs(LLVMContext &Ctx, StringRef FileName,
-                     StringRef ExpectedDecisionName,
-                     std::vector<LoggedFeatureSpec> &Ret) {
+Optional<std::vector<LoggedFeatureSpec>>
+loadOutputSpecs(LLVMContext &Ctx, StringRef ExpectedDecisionName,
+                StringRef ModelPath, StringRef SpecFileOverride) {
+  SmallVector<char, 128> OutputSpecsPath;
+  StringRef FileName = SpecFileOverride;
+  if (FileName.empty()) {
+    llvm::sys::path::append(OutputSpecsPath, ModelPath, "output_spec.json");
+    FileName = {OutputSpecsPath.data(), OutputSpecsPath.size()};
+  }
+
   auto BufferOrError = MemoryBuffer::getFileOrSTDIN(FileName);
   if (!BufferOrError) {
     Ctx.emitError("Error opening output specs file: " + FileName + " : " +
                   BufferOrError.getError().message());
-    return false;
+    return None;
   }
   auto ParsedJSONValues = json::parse(BufferOrError.get()->getBuffer());
   if (!ParsedJSONValues) {
     Ctx.emitError("Could not parse specs file: " + FileName);
-    return false;
+    return None;
   }
   auto ValuesArray = ParsedJSONValues->getAsArray();
   if (!ValuesArray) {
     Ctx.emitError("Expected an array of {tensor_spec:<TensorSpec>, "
                   "logging_name:<name>} dictionaries");
-    return false;
+    return None;
   }
-
+  std::vector<LoggedFeatureSpec> Ret;
   for (const auto &Value : *ValuesArray)
     if (const auto *Obj = Value.getAsObject())
       if (const auto *SpecPart = Obj->get("tensor_spec"))
@@ -249,7 +257,7 @@ bool loadOutputSpecs(LLVMContext &Ctx, StringRef FileName,
                   "Only int64, int32, and float tensors are supported. "
                   "Found unsupported type for tensor named " +
                   TensorSpec->name());
-              return false;
+              return None;
             }
             Ret.push_back({*TensorSpec, LoggingName->str()});
           }
@@ -261,15 +269,15 @@ bool loadOutputSpecs(LLVMContext &Ctx, StringRef FileName,
         "with a json object describing a TensorSpec; and a 'logging_name' key, "
         "which is a string to use as name when logging this tensor in the "
         "training log.");
-    return false;
+    return None;
   }
   if (Ret.empty() || *Ret[0].LoggingName != ExpectedDecisionName) {
     Ctx.emitError("The first output spec must describe the decision tensor, "
                   "and must have the logging_name " +
                   StringRef(ExpectedDecisionName));
-    return false;
+    return None;
   }
-  return true;
+  return Ret;
 }
 
 class TFModelEvaluatorImpl {


        


More information about the llvm-commits mailing list