[llvm] 4f763b2 - [llvm][NFC] Hide the tensorflow dependency from headers.

Mircea Trofin via llvm-commits llvm-commits at lists.llvm.org
Tue Jul 14 21:15:00 PDT 2020


Author: Mircea Trofin
Date: 2020-07-14T21:14:11-07:00
New Revision: 4f763b2172c591ab253c8489fcd53af0c544d5cb

URL: https://github.com/llvm/llvm-project/commit/4f763b2172c591ab253c8489fcd53af0c544d5cb
DIFF: https://github.com/llvm/llvm-project/commit/4f763b2172c591ab253c8489fcd53af0c544d5cb.diff

LOG: [llvm][NFC] Hide the tensorflow dependency from headers.

Summary:
This change avoids exposing tensorflow types when including TFUtils.h.
They are just an implementation detail, and don't need to be used
directly when implementing an analysis requiring ML model evaluation.

The TFUtils APIs, while generically typed, are still not exposed unless
the tensorflow C library is present, as they currently have no use
otherwise.

Reviewers: mehdi_amini, davidxl

Subscribers: hiraditya, llvm-commits

Tags: #llvm

Differential Revision: https://reviews.llvm.org/D83843

Added: 
    

Modified: 
    llvm/include/llvm/Analysis/Utils/TFUtils.h
    llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp
    llvm/lib/Analysis/TFUtils.cpp
    llvm/unittests/Analysis/TFUtilsTest.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/include/llvm/Analysis/Utils/TFUtils.h b/llvm/include/llvm/Analysis/Utils/TFUtils.h
index 118081652e9e..2ab2c7a57d94 100644
--- a/llvm/include/llvm/Analysis/Utils/TFUtils.h
+++ b/llvm/include/llvm/Analysis/Utils/TFUtils.h
@@ -12,7 +12,6 @@
 #include "llvm/Config/config.h"
 
 #ifdef LLVM_HAVE_TF_API
-#include "tensorflow/c/c_api.h"
 #include "llvm/IR/LLVMContext.h"
 
 #include <memory>
@@ -31,51 +30,35 @@ namespace llvm {
 /// - set input values by using getInput to get each input tensor, and then
 ///   setting internal scalars, for all dimensions (tensors are row-major:
 ///   https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/c/c_api.h#L205)
-/// - prepare an output vector of TF_Output* type, with the correct number of
-/// outputs (i.e. same as OutputNames). Initialize the vector with nullptr
-/// values.
 /// - call evaluate. The input tensors' values are not consumed after this, and
 ///   may still be read.
 /// - use the outputs in the output vector
-/// - deallocate each output tensor in the output vector, using TF_DeleteTensor.
+class TFModelEvaluatorImpl;
+class EvaluationResultImpl;
+
 class TFModelEvaluator final {
 public:
   /// The result of a model evaluation. Handles the lifetime of the output
-  /// TF_Tensor objects, which means that their values need to be used before
+  /// tensors, which means that their values need to be used before
   /// the EvaluationResult's dtor is called.
   class EvaluationResult {
   public:
-    ~EvaluationResult() {
-      for (auto *P : Output)
-        if (P)
-          TF_DeleteTensor(P);
-    }
-
     EvaluationResult(const EvaluationResult &) = delete;
-    EvaluationResult(EvaluationResult &&Other)
-        : OutputSize(Other.OutputSize), Output(std::move(Other.Output)) {
-      Other.Output.clear();
-    };
+    EvaluationResult(EvaluationResult &&Other);
+    ~EvaluationResult();
 
     /// Get a pointer to the first element of the tensor at Index.
     template <typename T> T *getTensorValue(size_t Index) {
-      return static_cast<T *>(TF_TensorData(Output[Index]));
+      return static_cast<T *>(getUntypedTensorValue(Index));
     }
 
   private:
     friend class TFModelEvaluator;
-    EvaluationResult(size_t OutputSize)
-        : OutputSize(OutputSize), Output(OutputSize){};
-
-    const size_t OutputSize;
-    std::vector<TF_Tensor *> Output;
+    EvaluationResult(std::unique_ptr<EvaluationResultImpl> Impl);
+    void *getUntypedTensorValue(size_t Index);
+    std::unique_ptr<EvaluationResultImpl> Impl;
   };
 
-  using TFGraphPtr = std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)>;
-  using TFSessionOptionsPtr =
-      std::unique_ptr<TF_SessionOptions, decltype(&TF_DeleteSessionOptions)>;
-  using TFStatusPtr = std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)>;
-
   TFModelEvaluator(StringRef SavedModelPath,
                    const std::vector<std::string> &InputNames,
                    const std::vector<std::string> &OutputNames,
@@ -87,53 +70,45 @@ class TFModelEvaluator final {
   /// Evaluate the model, assuming it is valid. Returns None if the evaluation
   /// fails or the model is invalid, or an EvaluationResult otherwise. The
   /// inputs are assumed to have been already provided via getInput(). When
-  /// returning None, it also marks the object invalid. Pass an Output vector
-  /// with the same size as OutputNames, but with nullptr values. evaluate()
-  /// will populate it with tensors, matching in index the corresponding
-  /// OutputNames. The caller is responsible for the deallocation of those
-  /// tensors, using TF_DeleteTensor.
+  /// returning None, it also invalidates this object.
   Optional<EvaluationResult> evaluate();
 
-  /// Provides access to the input vector. It is already dimensioned correctly,
-  /// but the values need to be allocated by the user.
-  std::vector<TF_Tensor *> &getInput() { return Input; }
+  /// Provides access to the input vector.
+  template <typename T> T *getInput(size_t Index) {
+    return static_cast<T *>(getUntypedInput(Index));
+  }
 
   /// Returns true if the tensorflow model was loaded successfully, false
   /// otherwise.
-  bool isValid() const { return !!Session; }
+  bool isValid() const { return !!Impl; }
 
-  /// Initialize the input at Index as a tensor of the given type and dimensions
-  void initInput(int Index, TF_DataType Type,
-                 const std::vector<int64_t> &Dimensions);
+  /// Initialize the input at Index as a tensor of the given type and
+  /// dimensions.
+  template <typename T>
+  void initInput(size_t Index, const std::vector<int64_t> &Dimensions) {
+    return initInput(Index, getModelTypeIndex<T>(), Dimensions);
+  }
 
 private:
-  /// The objects necessary for carrying out an evaluation of the SavedModel.
-  /// They are expensive to set up, and we maintain them accross all the
-  /// evaluations of the model.
-  TF_Session *Session = nullptr;
-  TFGraphPtr Graph;
-  TFSessionOptionsPtr Options;
-
-  /// The specification of the input nodes.
-  std::vector<TF_Output> InputFeed;
-
-  /// The input tensors. They must match by index of the corresponding InputFeed
-  /// value. We set up the tensors once and just mutate theirs scalars before
-  /// each evaluation. The input tensors keep their value after an evaluation.
-  std::vector<TF_Tensor *> Input;
-
-  /// The specification of the output nodes. When evaluating, the tensors in the
-  /// output tensor vector must match by index the corresponding element in the
-  /// OutputFeed.
-  std::vector<TF_Output> OutputFeed;
-
-  /// Reusable utility for deleting the session.
-  void deleteSession();
-
-  /// Reusable utility for ensuring we can bind the requested Name to a node in
-  /// the SavedModel Graph.
-  bool checkReportAndReset(const TF_Output &Output, StringRef Name);
+  void *getUntypedInput(size_t Index);
+  template <typename T> int getModelTypeIndex();
+  void initInput(size_t Index, int TypeIndex,
+                 const std::vector<int64_t> &Dimensions);
+
+  std::unique_ptr<TFModelEvaluatorImpl> Impl;
 };
+
+template <> int TFModelEvaluator::getModelTypeIndex<float>();
+template <> int TFModelEvaluator::getModelTypeIndex<double>();
+template <> int TFModelEvaluator::getModelTypeIndex<int8_t>();
+template <> int TFModelEvaluator::getModelTypeIndex<uint8_t>();
+template <> int TFModelEvaluator::getModelTypeIndex<int16_t>();
+template <> int TFModelEvaluator::getModelTypeIndex<uint16_t>();
+template <> int TFModelEvaluator::getModelTypeIndex<int32_t>();
+template <> int TFModelEvaluator::getModelTypeIndex<uint32_t>();
+template <> int TFModelEvaluator::getModelTypeIndex<int64_t>();
+template <> int TFModelEvaluator::getModelTypeIndex<uint64_t>();
+
 } // namespace llvm
 
 #endif // LLVM_HAVE_TF_API

diff  --git a/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp b/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp
index 1d1952ae6cbb..ebc59879d357 100644
--- a/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp
+++ b/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp
@@ -256,7 +256,7 @@ InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis() {
       1, static_cast<int64_t>(
              IRToNativeSizeLearning::FunctionFeatures::FeatureCount)};
 
-  Evaluator->initInput(0, TF_INT32, Dim);
+  Evaluator->initInput<int32_t>(0, Dim);
 }
 
 InlineSizeEstimatorAnalysis::Result
@@ -266,7 +266,7 @@ InlineSizeEstimatorAnalysis::run(const Function &F,
     return None;
   auto Features = IRToNativeSizeLearning::getFunctionFeatures(
       const_cast<Function &>(F), FAM);
-  int32_t *V = static_cast<int32_t *>(TF_TensorData(Evaluator->getInput()[0]));
+  int32_t *V = Evaluator->getInput<int32_t>(0);
   Features.fillTensor(V);
   auto ER = Evaluator->evaluate();
   if (!ER)

diff  --git a/llvm/lib/Analysis/TFUtils.cpp b/llvm/lib/Analysis/TFUtils.cpp
index 6cd5b5c9b4ea..19e6d626e238 100644
--- a/llvm/lib/Analysis/TFUtils.cpp
+++ b/llvm/lib/Analysis/TFUtils.cpp
@@ -17,6 +17,7 @@
 #include "llvm/Support/ManagedStatic.h"
 #include "llvm/Support/raw_ostream.h"
 
+#include "tensorflow/c/c_api.h"
 #include "tensorflow/c/c_api_experimental.h"
 
 #include <cassert>
@@ -25,6 +26,11 @@ using namespace llvm;
 
 namespace {
 
+using TFGraphPtr = std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)>;
+using TFSessionOptionsPtr =
+    std::unique_ptr<TF_SessionOptions, decltype(&TF_DeleteSessionOptions)>;
+using TFStatusPtr = std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)>;
+
 struct TFInitializer {
   TFInitializer() {
     assert(!IsInitialized && "TFInitialized should be called only once");
@@ -41,24 +47,96 @@ llvm::ManagedStatic<TFInitializer> TFLibInitializer;
 
 bool ensureInitTF() { return TFLibInitializer->IsInitialized; }
 
-TFModelEvaluator::TFGraphPtr createTFGraph() {
-  return TFModelEvaluator::TFGraphPtr(TF_NewGraph(), &TF_DeleteGraph);
+TFGraphPtr createTFGraph() {
+  return TFGraphPtr(TF_NewGraph(), &TF_DeleteGraph);
 }
 
-TFModelEvaluator::TFStatusPtr createTFStatus() {
-  return TFModelEvaluator::TFStatusPtr(TF_NewStatus(), &TF_DeleteStatus);
+TFStatusPtr createTFStatus() {
+  return TFStatusPtr(TF_NewStatus(), &TF_DeleteStatus);
 }
 
-TFModelEvaluator::TFSessionOptionsPtr createTFSessionOptions() {
-  return TFModelEvaluator::TFSessionOptionsPtr(TF_NewSessionOptions(),
-                                               &TF_DeleteSessionOptions);
+TFSessionOptionsPtr createTFSessionOptions() {
+  return TFSessionOptionsPtr(TF_NewSessionOptions(), &TF_DeleteSessionOptions);
 }
 } // namespace
 
-TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath,
-                                   const std::vector<std::string> &InputNames,
-                                   const std::vector<std::string> &OutputNames,
-                                   const char *Tags)
+namespace llvm {
+class EvaluationResultImpl {
+public:
+  EvaluationResultImpl(size_t OutputSize)
+      : OutputSize(OutputSize), Output(OutputSize){};
+
+  ~EvaluationResultImpl() {
+    for (auto *P : Output)
+      if (P)
+        TF_DeleteTensor(P);
+  }
+
+  EvaluationResultImpl(const EvaluationResultImpl &) = delete;
+  EvaluationResultImpl(EvaluationResultImpl &&Other) = delete;
+  std::vector<TF_Tensor *> &getOutput() { return Output; }
+
+private:
+  const size_t OutputSize;
+  std::vector<TF_Tensor *> Output;
+};
+
+class TFModelEvaluatorImpl {
+public:
+  TFModelEvaluatorImpl(StringRef SavedModelPath,
+                       const std::vector<std::string> &InputNames,
+                       const std::vector<std::string> &OutputNames,
+                       const char *Tags);
+
+  bool isValid() const { return IsValid; }
+  size_t OutputSize() const { return OutputFeed.size(); }
+
+  void evaluate(TF_Tensor **Output, TF_Status *Status) {
+    TF_SessionRun(Session, nullptr, InputFeed.data(), Input.data(),
+                  Input.size(), OutputFeed.data(), Output, OutputFeed.size(),
+                  nullptr, 0, nullptr, Status);
+  }
+
+  void initInput(size_t Index, TF_DataType Type,
+                 const std::vector<int64_t> &Dimensions);
+  const std::vector<TF_Tensor *> &getInput() const { return Input; }
+
+  ~TFModelEvaluatorImpl();
+
+private:
+  /// The objects necessary for carrying out an evaluation of the SavedModel.
+  /// They are expensive to set up, and we maintain them accross all the
+  /// evaluations of the model.
+  TF_Session *Session = nullptr;
+  TFGraphPtr Graph;
+  TFSessionOptionsPtr Options;
+
+  /// The specification of the input nodes.
+  std::vector<TF_Output> InputFeed;
+
+  /// The input tensors. They must match by index of the corresponding InputFeed
+  /// value. We set up the tensors once and just mutate theirs scalars before
+  /// each evaluation. The input tensors keep their value after an evaluation.
+  std::vector<TF_Tensor *> Input;
+
+  /// The specification of the output nodes. When evaluating, the tensors in the
+  /// output tensor vector must match by index the corresponding element in the
+  /// OutputFeed.
+  std::vector<TF_Output> OutputFeed;
+
+  void invalidate() { IsValid = false; }
+
+  bool IsValid = true;
+
+  /// Reusable utility for ensuring we can bind the requested Name to a node in
+  /// the SavedModel Graph.
+  bool checkReportAndInvalidate(const TF_Output &Output, StringRef Name);
+};
+} // namespace llvm
+
+TFModelEvaluatorImpl::TFModelEvaluatorImpl(
+    StringRef SavedModelPath, const std::vector<std::string> &InputNames,
+    const std::vector<std::string> &OutputNames, const char *Tags)
     : Graph(createTFGraph()), Options(createTFSessionOptions()),
       InputFeed(InputNames.size()), Input(InputNames.size()),
       OutputFeed(OutputNames.size()) {
@@ -73,39 +151,36 @@ TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath,
                                          Graph.get(), nullptr, Status.get());
   if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {
     errs() << TF_Message(Status.get());
-    deleteSession();
+    invalidate();
   }
   for (size_t I = 0; I < InputNames.size(); ++I) {
     InputFeed[I] = {
         TF_GraphOperationByName(Graph.get(), (InputNames[I]).c_str()), 0};
-    if (!checkReportAndReset(InputFeed[I], InputNames[I]))
+    if (!checkReportAndInvalidate(InputFeed[I], InputNames[I]))
       return;
   }
   for (size_t I = 0; I < OutputNames.size(); ++I) {
     OutputFeed[I] = {
         TF_GraphOperationByName(Graph.get(), (OutputNames[I]).c_str()), 0};
-    if (!checkReportAndReset(OutputFeed[I], OutputNames[I]))
+    if (!checkReportAndInvalidate(OutputFeed[I], OutputNames[I]))
       return;
   }
 }
 
-TFModelEvaluator::~TFModelEvaluator() {
+TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath,
+                                   const std::vector<std::string> &InputNames,
+                                   const std::vector<std::string> &OutputNames,
+                                   const char *Tags)
+    : Impl(new TFModelEvaluatorImpl(SavedModelPath, InputNames, OutputNames,
+                                    Tags)) {
+  if (!Impl->isValid())
+    Impl.reset();
+}
+
+TFModelEvaluatorImpl::~TFModelEvaluatorImpl() {
   for (auto *T : Input) {
     TF_DeleteTensor(T);
   }
-  deleteSession();
-}
-
-bool TFModelEvaluator::checkReportAndReset(const TF_Output &Output,
-                                           StringRef Name) {
-  if (Output.oper)
-    return true;
-  errs() << "Could not find TF_Output named: " + Name;
-  deleteSession();
-  return false;
-}
-
-void TFModelEvaluator::deleteSession() {
   if (Session == nullptr)
     return;
   auto Status = createTFStatus();
@@ -115,24 +190,32 @@ void TFModelEvaluator::deleteSession() {
     errs() << "Could not delete TF session";
 }
 
+bool TFModelEvaluatorImpl::checkReportAndInvalidate(const TF_Output &Output,
+                                                    StringRef Name) {
+  if (Output.oper)
+    return true;
+  errs() << "Could not find TF_Output named: " + Name;
+  IsValid = false;
+  return IsValid;
+}
+
 Optional<TFModelEvaluator::EvaluationResult> TFModelEvaluator::evaluate() {
   if (!isValid())
     return None;
-  EvaluationResult Ret(OutputFeed.size());
+  std::unique_ptr<EvaluationResultImpl> Ret =
+      std::make_unique<EvaluationResultImpl>(Impl->OutputSize());
   auto Status = createTFStatus();
-  TF_SessionRun(Session, nullptr, InputFeed.data(), Input.data(), Input.size(),
-                OutputFeed.data(), Ret.Output.data(), Ret.Output.size(),
-                nullptr, 0, nullptr, Status.get());
+  Impl->evaluate(Ret->getOutput().data(), Status.get());
   if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {
     errs() << TF_Message(Status.get());
-    deleteSession();
+    Impl.reset();
     return None;
   }
-  return Ret;
+  return EvaluationResult(std::move(Ret));
 }
 
-void TFModelEvaluator::initInput(int Index, TF_DataType Type,
-                                 const std::vector<int64_t> &Dimensions) {
+void TFModelEvaluatorImpl::initInput(size_t Index, TF_DataType Type,
+                                     const std::vector<int64_t> &Dimensions) {
   int64_t TotalSize = TF_DataTypeSize(Type);
   for (auto &D : Dimensions)
     TotalSize *= D;
@@ -140,4 +223,67 @@ void TFModelEvaluator::initInput(int Index, TF_DataType Type,
   Input[Index] =
       TF_AllocateTensor(Type, Dimensions.data(), Dimensions.size(), TotalSize);
   std::memset(TF_TensorData(Input[Index]), 0, TotalSize);
-}
\ No newline at end of file
+}
+
+void *TFModelEvaluator::getUntypedInput(size_t Index) {
+  return TF_TensorData(Impl->getInput()[Index]);
+}
+
+TFModelEvaluator::EvaluationResult::EvaluationResult(
+    std::unique_ptr<EvaluationResultImpl> Impl)
+    : Impl(std::move(Impl)) {}
+
+TFModelEvaluator::EvaluationResult::EvaluationResult(EvaluationResult &&Other)
+    : Impl(std::move(Other.Impl)) {}
+
+void *TFModelEvaluator::EvaluationResult::getUntypedTensorValue(size_t Index) {
+  return TF_TensorData(Impl->getOutput()[Index]);
+}
+
+void TFModelEvaluator::initInput(size_t Index, int TypeIndex,
+                                 const std::vector<int64_t> &Dimensions) {
+  Impl->initInput(Index, static_cast<TF_DataType>(TypeIndex), Dimensions);
+}
+
+template <> int TFModelEvaluator::getModelTypeIndex<float>() {
+  return TF_FLOAT;
+}
+
+template <> int TFModelEvaluator::getModelTypeIndex<double>() {
+  return TF_DOUBLE;
+}
+
+template <> int TFModelEvaluator::getModelTypeIndex<int8_t>() {
+  return TF_INT8;
+}
+
+template <> int TFModelEvaluator::getModelTypeIndex<uint8_t>() {
+  return TF_UINT8;
+}
+
+template <> int TFModelEvaluator::getModelTypeIndex<int16_t>() {
+  return TF_INT16;
+}
+
+template <> int TFModelEvaluator::getModelTypeIndex<uint16_t>() {
+  return TF_UINT16;
+}
+
+template <> int TFModelEvaluator::getModelTypeIndex<int32_t>() {
+  return TF_INT32;
+}
+
+template <> int TFModelEvaluator::getModelTypeIndex<uint32_t>() {
+  return TF_UINT32;
+}
+
+template <> int TFModelEvaluator::getModelTypeIndex<int64_t>() {
+  return TF_INT64;
+}
+
+template <> int TFModelEvaluator::getModelTypeIndex<uint64_t>() {
+  return TF_UINT64;
+}
+
+TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
+TFModelEvaluator::~TFModelEvaluator() {}

diff  --git a/llvm/unittests/Analysis/TFUtilsTest.cpp b/llvm/unittests/Analysis/TFUtilsTest.cpp
index 4c775c4c0b93..1e54f1885b2c 100644
--- a/llvm/unittests/Analysis/TFUtilsTest.cpp
+++ b/llvm/unittests/Analysis/TFUtilsTest.cpp
@@ -45,9 +45,9 @@ TEST(TFUtilsTest, LoadAndExecuteTest) {
   static const std::vector<int64_t> Dim{1, KnownSize};
 
   EXPECT_TRUE(Evaluator.isValid());
-  Evaluator.initInput(0, TF_INT32, Dim);
+  Evaluator.initInput<int32_t>(0, Dim);
 
-  int32_t *V = static_cast<int32_t *>(TF_TensorData(Evaluator.getInput()[0]));
+  int32_t *V = Evaluator.getInput<int32_t>(0);
   // Fill it up with 1's, we know the output.
   for (auto I = 0; I < KnownSize; ++I) {
     V[I] = 1;
@@ -85,9 +85,9 @@ TEST(TFUtilsTest, EvalError) {
   static const std::vector<int64_t> Dim{1, KnownSize};
 
   EXPECT_TRUE(Evaluator.isValid());
-  Evaluator.initInput(0, TF_INT32, Dim);
+  Evaluator.initInput<int32_t>(0, Dim);
 
-  int32_t *V = static_cast<int32_t *>(TF_TensorData(Evaluator.getInput()[0]));
+  int32_t *V = Evaluator.getInput<int32_t>(0);
   // Fill it up with 1's, we know the output.
   for (auto I = 0; I < KnownSize; ++I) {
     V[I] = 1;


        


More information about the llvm-commits mailing list