[llvm] 55e2d20 - [MLGO] Use binary protobufs for improved training performance.

Mircea Trofin via llvm-commits llvm-commits at lists.llvm.org
Mon Jul 19 13:59:37 PDT 2021


Author: Mircea Trofin
Date: 2021-07-19T13:59:28-07:00
New Revision: 55e2d2060a367a293710f44fd61a03d797d4aade

URL: https://github.com/llvm/llvm-project/commit/55e2d2060a367a293710f44fd61a03d797d4aade
DIFF: https://github.com/llvm/llvm-project/commit/55e2d2060a367a293710f44fd61a03d797d4aade.diff

LOG: [MLGO] Use binary protobufs for improved training performance.

It turns out that during training, the time required to parse the
textual protobuf of a training log is about the same as the time it
takes to compile the module generating that log. Using binary protobufs
instead elides that cost almost completely.

Differential Revision: https://reviews.llvm.org/D106157

Added: 
    

Modified: 
    llvm/CMakeLists.txt
    llvm/lib/Analysis/CMakeLists.txt
    llvm/lib/Analysis/TFUtils.cpp
    llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll
    llvm/test/Transforms/Inline/ML/development-training-log.ll
    llvm/unittests/Analysis/TFUtilsTest.cpp

Removed: 
    


################################################################################
diff  --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt
index 5d3ad7a4fd582..8f4d89749004b 100644
--- a/llvm/CMakeLists.txt
+++ b/llvm/CMakeLists.txt
@@ -788,8 +788,26 @@ endif()
 set(TENSORFLOW_C_LIB_PATH "" CACHE PATH "Path to TensorFlow C library install")
 if (TENSORFLOW_C_LIB_PATH)
   find_library(tensorflow_c_api tensorflow PATHS ${TENSORFLOW_C_LIB_PATH}/lib NO_DEFAULT_PATH REQUIRED)
+  # Currently, the protobuf headers are distributed with the pip package that corresponds to the version
+  # of the C API library.
+  find_library(tensorflow_fx tensorflow_framework PATHS ${TENSORFLOW_C_LIB_PATH}/lib NO_DEFAULT_PATH REQUIRED)
   set(LLVM_HAVE_TF_API "ON" CACHE BOOL "Full Tensorflow API available")
   include_directories(${TENSORFLOW_C_LIB_PATH}/include)
+  if (NOT TF_PROTO_HEADERS)
+    message(STATUS "TF_PROTO_HEADERS not defined. Looking for tensorflow pip package.")
+    execute_process(COMMAND 
+      ${Python3_EXECUTABLE} "-m" "pip" "show" "tensorflow" 
+      OUTPUT_VARIABLE TF_PIP_OUT)
+    if ("${TF_PIP_OUT}" STREQUAL "")
+      message(FATAL ERROR "Tensorflow pip package is also required for 'development' mode (protobuf headers)")
+    endif()
+    string(REGEX MATCH "Location: ([^\n]*\n)" TF_PIP_LOC "${TF_PIP_OUT}")
+    string(REPLACE "Location: " "" TF_PIP ${TF_PIP_LOC})
+    set(TF_PROTO_HEADERS ${TF_PIP}/include)
+  endif()
+  include_directories(${TF_PROTO_HEADERS})
+  add_definitions("-DGOOGLE_PROTOBUF_NO_RTTI")
+  add_definitions("-D_GLIBCXX_USE_CXX11_ABI=0")
 endif()
 
 # For up-to-date instructions for installing the Tensorflow dependency, refer to

diff  --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt
index 170cd2cbf7dbd..e0cd6b387f3d7 100644
--- a/llvm/lib/Analysis/CMakeLists.txt
+++ b/llvm/lib/Analysis/CMakeLists.txt
@@ -27,7 +27,7 @@ if (DEFINED LLVM_HAVE_TF_AOT OR DEFINED LLVM_HAVE_TF_API)
   endif()
 
   if (DEFINED LLVM_HAVE_TF_API)
-    list(APPEND MLLinkDeps ${tensorflow_c_api})
+    list(APPEND MLLinkDeps ${tensorflow_c_api} ${tensorflow_fx})
   endif()
 endif()
 

diff  --git a/llvm/lib/Analysis/TFUtils.cpp b/llvm/lib/Analysis/TFUtils.cpp
index 701b654ba58a3..d09668528a693 100644
--- a/llvm/lib/Analysis/TFUtils.cpp
+++ b/llvm/lib/Analysis/TFUtils.cpp
@@ -15,6 +15,7 @@
 
 #include "llvm/ADT/Twine.h"
 #include "llvm/Analysis/Utils/TFUtils.h"
+#include "llvm/Support/CommandLine.h"
 #include "llvm/Support/Debug.h"
 #include "llvm/Support/JSON.h"
 #include "llvm/Support/ManagedStatic.h"
@@ -22,14 +23,19 @@
 #include "llvm/Support/Path.h"
 #include "llvm/Support/raw_ostream.h"
 
+#include "google/protobuf/text_format.h"
 #include "tensorflow/c/c_api.h"
 #include "tensorflow/c/c_api_experimental.h"
-
+#include "tensorflow/core/example/example.pb.h"
 #include <cassert>
 #include <numeric>
 
 using namespace llvm;
 
+static cl::opt<bool>
+    ProtobufTextMode("tfutils-text-log", cl::init(false), cl::Hidden,
+                     cl::desc("Output textual (human-readable) protobuf."));
+
 namespace {
 
 using TFGraphPtr = std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)>;
@@ -65,85 +71,53 @@ TFSessionOptionsPtr createTFSessionOptions() {
   return TFSessionOptionsPtr(TF_NewSessionOptions(), &TF_DeleteSessionOptions);
 }
 
-/// Write the values of one tensor as a list.
-template <typename T>
-void writeTensorValues(raw_ostream &OutFile, const char *TensorData,
-                       size_t ElemCount) {
-  OutFile << "[";
-  const T *TypedData = reinterpret_cast<const T *>(TensorData);
-  ListSeparator LS;
-  for (size_t I = 0; I < ElemCount; ++I)
-    OutFile << LS << TypedData[I];
-  OutFile << "]";
-}
-
 /// Write a list of tensors as a sequence of TensorFlow FeatureList protobufs.
 /// The tensors are assumed to be stored contiguously, in row-major format,
 /// in the TensorData buffer. Each tensor has the shape given by Spec. The
 /// feature name in the output is either the provided LoggingName, if
 /// specified, otherwise it's the name of the tensor (as given by Spec).
-void writeRawTensorsAsFeatureLists(raw_ostream &OutFile,
+void writeRawTensorsAsFeatureLists(tensorflow::FeatureLists *FE,
                                    const LoggedFeatureSpec &LoggedSpec,
                                    const char *TensorData, size_t TensorCount,
                                    bool FinalReward = false) {
-  const char *FieldName = "<invalid>";
-  std::function<void(const char *)> ValueWriter;
   const auto &Spec = LoggedSpec.Spec;
   // The 'Feature' protobuf only has 3 possible fields: float_list,
   // int64_list, or bytes_list, so we capture int32 values as int64. We don't
   // support any other types.
-  if (Spec.isElementType<int64_t>()) {
-    FieldName = "int64_list";
-    ValueWriter = [&](const char *Data) {
-      writeTensorValues<int64_t>(OutFile, Data, Spec.getElementCount());
-    };
-  } else if (Spec.isElementType<int32_t>()) {
-    FieldName = "int64_list";
-    ValueWriter = [&](const char *Data) {
-      writeTensorValues<int32_t>(OutFile, Data, Spec.getElementCount());
-    };
-
-  } else if (Spec.isElementType<float>()) {
-    FieldName = "float_list";
-    ValueWriter = [&](const char *Data) {
-      writeTensorValues<float>(OutFile, Data, Spec.getElementCount());
-    };
-
-  } else {
-    llvm_unreachable("Unsupported tensor type.");
-  }
-
-  OutFile << "  feature_list: {\n";
-  OutFile << "    key: "
-          << "\""
-          << (LoggedSpec.LoggingName ? *LoggedSpec.LoggingName : Spec.name())
-          << "\" ";
-  OutFile << "value: {\n";
-  size_t TensorByteSize = Spec.getElementCount() * Spec.getElementByteSize();
-
-  auto WriteFeatureProto = [&](const char *P) {
-    OutFile << "      feature: { " << FieldName << ": { value: ";
-    ValueWriter(P);
-    OutFile << " } }\n";
-  };
+  tensorflow::FeatureList &FL = (*FE->mutable_feature_list())[(
+      LoggedSpec.LoggingName ? *LoggedSpec.LoggingName : Spec.name())];
 
   const char *CurrentTensor = TensorData;
-  static int64_t Zero = 0;
-  // Write all but the last value. If this is the final reward, don't increment
-  // the CurrentTensor, and just write 0.
-  for (size_t I = 0; I < TensorCount - 1; ++I) {
-    if (FinalReward)
-      WriteFeatureProto(reinterpret_cast<const char *>(&Zero));
-    else {
-      WriteFeatureProto(CurrentTensor);
-      CurrentTensor += TensorByteSize;
+  const size_t TensorByteSize =
+      Spec.getElementCount() * Spec.getElementByteSize();
+  const size_t ElemCount = Spec.getElementCount();
+  for (size_t E = 0; E < TensorCount; ++E) {
+    const bool ShouldWrite = E + 1 == TensorCount || !FinalReward;
+
+    if (Spec.isElementType<int64_t>()) {
+      auto *MF = FL.add_feature()->mutable_int64_list()->mutable_value();
+      MF->Resize(ElemCount, 0);
+      if (ShouldWrite)
+        memcpy(MF->mutable_data(), CurrentTensor, TensorByteSize);
+    } else if (Spec.isElementType<int32_t>()) {
+      auto *MF = FL.add_feature()->mutable_int64_list()->mutable_value();
+      MF->Resize(ElemCount, 0);
+      if (ShouldWrite) {
+        const int32_t *TD = reinterpret_cast<const int32_t *>(CurrentTensor);
+        for (size_t I = 0; I < ElemCount; ++I)
+          (*MF)[I] = TD[I];
+      }
+    } else if (Spec.isElementType<float>()) {
+      auto *MF = FL.add_feature()->mutable_float_list()->mutable_value();
+      MF->Resize(ElemCount, 0.0);
+      if (ShouldWrite)
+        memcpy(MF->mutable_data(), CurrentTensor, TensorByteSize);
+    } else {
+      llvm_unreachable("Unsupported tensor type.");
     }
+    if (ShouldWrite)
+      CurrentTensor += TensorByteSize;
   }
-
-  WriteFeatureProto(CurrentTensor);
-
-  OutFile << "    }\n";
-  OutFile << "  }\n";
 }
 } // namespace
 
@@ -475,6 +449,8 @@ TFModelEvaluator::EvaluationResult::~EvaluationResult() {}
 TFModelEvaluator::~TFModelEvaluator() {}
 
 void Logger::print(raw_ostream &OS) {
+  tensorflow::SequenceExample SE;
+
   if (RawLogData.empty())
     return;
   if (RawLogData[0].empty())
@@ -488,16 +464,21 @@ void Logger::print(raw_ostream &OS) {
       RewardSpec.getElementCount() * RewardSpec.getElementByteSize();
   size_t NumberOfRewards = RawLogData.back().size() / RewardSize;
 
-  OS << "feature_lists: {\n";
+  tensorflow::FeatureLists *FE = SE.mutable_feature_lists();
   for (size_t I = 0; I < FeatureSpecs.size(); ++I)
-    writeRawTensorsAsFeatureLists(OS, FeatureSpecs[I], RawLogData[I].data(),
+    writeRawTensorsAsFeatureLists(FE, FeatureSpecs[I], RawLogData[I].data(),
                                   NumberOfRecords);
 
   if (IncludeReward)
-    writeRawTensorsAsFeatureLists(OS, {RewardSpec, None},
+    writeRawTensorsAsFeatureLists(FE, {RewardSpec, None},
                                   RawLogData.back().data(), NumberOfRecords,
                                   NumberOfRewards == 1);
-
-  OS << "}\n";
+  std::string OutStr;
+  if (ProtobufTextMode) {
+    google::protobuf::TextFormat::PrintToString(SE, &OutStr);
+  } else {
+    OutStr = SE.SerializeAsString();
+  }
+  OS << OutStr;
 }
 #endif // defined(LLVM_HAVE_TF_API)

diff  --git a/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll b/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll
index 0b97118ee4281..a410a2a071aa0 100644
--- a/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll
+++ b/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll
@@ -7,40 +7,35 @@
 ; REQUIRES: have_tf_api
 ;
 ; Generate mock model
-; RUN: rm -rf %t && mkdir %t
+; RUN: rm -rf %t
 ; RUN: %python %S/../../../../lib/Analysis/models/generate_mock_model.py %S/../../../../lib/Analysis/models/inlining/config.py %t
 ;
 ; When the bounds are very wide ("no bounds"), all inlinings happen.
-; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%t -training-log=- -enable-ml-inliner=development -ml-advisor-size-increase-threshold=10.0 -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=NOBOUNDS
+; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%t -training-log=- -tfutils-text-log -enable-ml-inliner=development -ml-advisor-size-increase-threshold=10.0 -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=NOBOUNDS
 ;
 ; When the bounds are very restrictive, the first inlining happens but it's
 ; considered as "bad" (since it trips over the bounds) and its reward is a
 ; penalty. However, the mandatory inlining, which is considered next, happens.
 ; No other inlinings happend.
-; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%t -training-log=- -enable-ml-inliner=development -ml-advisor-size-increase-threshold=1.0 -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=BOUNDS
+; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%t -training-log=- -tfutils-text-log -enable-ml-inliner=development -ml-advisor-size-increase-threshold=1.0 -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=BOUNDS
 ;
 ; With more restrictive bounds, the first inlining happens and is OK. The
 ; mandatory inlining happens next, and it trips over the bounds, which then
 ; forces no further inlinings.
-; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%t -training-log=- -enable-ml-inliner=development -ml-advisor-size-increase-threshold=1.1 -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=RELAXED-BOUNDS
-
+; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%t -training-log=- -tfutils-text-log -enable-ml-inliner=development -ml-advisor-size-increase-threshold=1.1 -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=RELAXED-BOUNDS
 target datalayout = "e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-grtev4-linux-gnu"
-
 declare i64 @f1()
-
 define i64 @may_not_be_inlined() {
   %r = call i64 @f1()
   %r2 = add i64 13, %r
   ret i64 %r2
 }
-
 define i64 @must_be_inlined() #0 {
   %r = call i64 @may_not_be_inlined()
   %r2 = add i64 13, %r
   ret i64 %r2
 }
-
 define i64 @top() {
   %r = call i64 @must_be_inlined()
   %r2 = call i64 @may_not_be_inlined()
@@ -49,15 +44,25 @@ define i64 @top() {
   %r5 = add i64 %r3, %r4
   ret i64 %r5
 }
-
 attributes #0 = { alwaysinline }
-; CHECK: key: "delta_size" value: {
-; NOBOUNDS-NEXT: feature: { int64_list: { value: [6] } }
-; RELAXED-BOUNDS-NEXT: feature: { int64_list: { value: [6] } }
-; NOBOUNDS-NEXT: feature: { int64_list: { value: [-11] } }
-; NOBOUNDS-NEXT: feature: { int64_list: { value: [4] } }
-; BOUNDS-NEXT: feature: { int64_list: { value: [2147483647] } }
-; CHECK-NEXT: }
+; CHECK:       key: "delta_size"
+; CHECK-NEXT:     value {
+; CHECK-NEXT:       feature {
+; CHECK-NEXT:         int64_list {
+; NOBOUNDS-NEXT:        value: 6
+; RELAXED-BOUNDS-NEXT:  value: 6
+; NOBOUNDS-NEXT:      }
+; NOBOUNDS-NEXT:    }
+; NOBOUNDS-NEXT:    feature {
+; NOBOUNDS-NEXT:      int64_list {
+; NOBOUNDS-NEXT:        value: -11
+; NOBOUNDS-NEXT:      }
+; NOBOUNDS-NEXT:    }
+; NOBOUNDS-NEXT:    feature {
+; NOBOUNDS-NEXT:      int64_list {
+; NOBOUNDS-NEXT:        value: 4
+; BOUNDS-NEXT:          value: 2147483647
+; CHECK-NEXT:         }
 ; CHECK-LABEL: @top
 ; must_be_inlined must always be inlined, so we won't find a call to it in @top()
 ; CHECK-NOT: call i64 @must_be_inlined

diff  --git a/llvm/test/Transforms/Inline/ML/development-training-log.ll b/llvm/test/Transforms/Inline/ML/development-training-log.ll
index 422cb83c0b275..c571c02b3fd51 100644
--- a/llvm/test/Transforms/Inline/ML/development-training-log.ll
+++ b/llvm/test/Transforms/Inline/ML/development-training-log.ll
@@ -1,58 +1,56 @@
 ; Test that we can produce a log if we have or do not have a model, in development mode.
 ; REQUIRES: have_tf_api
 ; Generate mock model
-; RUN: rm -rf %t && mkdir %t
+; RUN: rm -rf %t
 ; RUN: %python %S/../../../../lib/Analysis/models/generate_mock_model.py %S/../../../../lib/Analysis/models/inlining/config.py %t
 ;
-; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-model-under-training=%t -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -S < %s | FileCheck %s 
-; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-model-under-training=%t -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-output-spec-override=%S/Inputs/test_output_spec.json -S < %s | FileCheck %s --check-prefixes=EXTRA-OUTPUTS,CHECK
-; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -S < %s | FileCheck %s
-; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -ml-inliner-model-under-training=%t -S < %s | FileCheck %s --check-prefix=NOREWARD
-; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -S < %s | FileCheck %s --check-prefix=NOREWARD
-
+; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -tfutils-text-log -ml-inliner-model-under-training=%t -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -S < %s | FileCheck %s 
+; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -tfutils-text-log -ml-inliner-model-under-training=%t -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-output-spec-override=%S/Inputs/test_output_spec.json -S < %s | FileCheck %s --check-prefixes=EXTRA-OUTPUTS,CHECK
+; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -tfutils-text-log -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -S < %s | FileCheck %s
+; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -tfutils-text-log -ml-inliner-model-under-training=%t -S < %s | FileCheck %s --check-prefix=NOREWARD
+; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -tfutils-text-log -S < %s | FileCheck %s --check-prefix=NOREWARD
 target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
 target triple = "x86_64-pc-linux-gnu"
-
 declare i32 @f1(i32)
 declare i32 @f2(i32)
-
 define dso_local i32 @branches(i32) {
   %cond = icmp slt i32 %0, 3
   br i1 %cond, label %then, label %else
-
 then:
   %ret.1 = call i32 @f1(i32 %0)
   br label %last.block
-
 else:
   %ret.2 = call i32 @f2(i32 %0)
   br label %last.block
-
 last.block:
   %ret = phi i32 [%ret.1, %then], [%ret.2, %else]
   ret i32 %ret
 }
-
 define dso_local i32 @top() {
   %1 = call i32 @branches(i32 2)
   ret i32 %1
 }
-
-
 !llvm.module.flags = !{!0}
 !llvm.ident = !{!1}
-
 !0 = !{i32 1, !"wchar_size", i32 4}
 !1 = !{!"clang version 7.0.0-6 (tags/RELEASE_700/final)"}
-
 ; Check we produce a protobuf that has inlining decisions and rewards.
+; CHECK:                  key: "delta_size"
+; CHECK-NEXT:               value {
+; CHECK-NEXT:                 feature {
+; CHECK-NEXT:                   int64_list {
+; CHECK-NEXT:                     value: 0
+; CHECK-NEXT:                   }
+; CHECK-NEXT:                 }
 ; CHECK-NOT: fake_extra_output
-; EXTRA-OUTPUTS:          key: "fake_extra_output" value: {
-; EXTRA-OUTPUTS-NEXT:       feature: { int64_list: { value: [{{[0-9]+}}] } }
-; CHECK:          key: "inlining_decision" value: {
-; CHECK-NEXT:       feature: { int64_list: { value: [1] } }
-; CHECK:          key: "delta_size" value: {
-; CHECK-NEXT:       feature: { int64_list: { value: [0] } }
-; CHECK-NEXT:     }
-; CHECK-NEXT:   }
-; NOREWARD-NOT: key: "delta_size" value: {
+; EXTRA-OUTPUTS:          key: "fake_extra_output"
+; EXTRA-OUTPUTS-NEXT:       value {
+; EXTRA-OUTPUTS-NEXT:         feature {
+; EXTRA-OUTPUTS-NEXT:           int64_list {
+; EXTRA-OUTPUTS-NEXT:             value: {{[0-9]+}}
+; CHECK:                  key: "inlining_decision"
+; CHECK-NEXT:               value {
+; CHECK-NEXT:                 feature {
+; CHECK-NEXT:                   int64_list {
+; CHECK-NEXT:                     value: 1
+; NOREWARD-NOT: key: "delta_size"

diff  --git a/llvm/unittests/Analysis/TFUtilsTest.cpp b/llvm/unittests/Analysis/TFUtilsTest.cpp
index 1cd64f15e288a..a214663379085 100644
--- a/llvm/unittests/Analysis/TFUtilsTest.cpp
+++ b/llvm/unittests/Analysis/TFUtilsTest.cpp
@@ -7,6 +7,8 @@
 //===----------------------------------------------------------------------===//
 
 #include "llvm/Analysis/Utils/TFUtils.h"
+#include "tensorflow/core/example/example.pb.h"
+#include "tensorflow/core/example/feature.pb.h"
 #include "llvm/AsmParser/Parser.h"
 #include "llvm/IR/Dominators.h"
 #include "llvm/IR/Instructions.h"
@@ -143,6 +145,18 @@ TEST(TFUtilsTest, TensorSpecSizesAndTypes) {
   EXPECT_EQ(Spec1D.getElementByteSize(), sizeof(int16_t));
 }
 
+#define PROTO_CHECKER(FNAME, TYPE, INDEX, EXP)                                 \
+  do {                                                                         \
+    const auto &V = Expected.feature_lists()                                   \
+                        .feature_list()                                        \
+                        .at(FNAME)                                             \
+                        .feature(INDEX)                                        \
+                        .TYPE()                                                \
+                        .value();                                              \
+    for (auto I = 0; I < V.size(); ++I)                                        \
+      EXPECT_EQ(V.at(I), EXP[I]);                                              \
+  } while (false)
+
 TEST(TFUtilsTest, Logger) {
   std::vector<LoggedFeatureSpec> Features;
   Features.push_back(
@@ -152,42 +166,31 @@ TEST(TFUtilsTest, Logger) {
 
   auto Rewards = TensorSpec::createSpec<float>("reward", {1});
   Logger L(Features, Rewards, true);
-  float F00[]{0.0, 0.1, 0.2, 0.3, 0.4, 0.5};
-  int64_t F01[]{2, 3};
+  const float F00[]{0.0, 0.1, 0.2, 0.3, 0.4, 0.5};
+  const int64_t F01[]{2, 3};
 
   L.logTensorValue(0, F00, 6);
   L.logTensorValue(1, F01, 2);
   L.logReward<float>(3.4);
-  float F10[]{0.0, 1.0, 2.0, 3.0, 4.0, 5.0};
-  int64_t F11[]{-2, -3};
+  const float F10[]{0.0, 1.0, 2.0, 3.0, 4.0, 5.0};
+  const int64_t F11[]{-2, -3};
   L.logTensorValue(0, F10, 6);
   L.logTensorValue(1, F11, 2);
   L.logReward<float>(-3.0);
-  const auto *Expected = R"(feature_lists: {
-  feature_list: {
-    key: "the_float" value: {
-      feature: { float_list: { value: [0.000000e+00, 1.000000e-01, 2.000000e-01, 3.000000e-01, 4.000000e-01, 5.000000e-01] } }
-      feature: { float_list: { value: [0.000000e+00, 1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00] } }
-    }
-  }
-  feature_list: {
-    key: "alternate_name" value: {
-      feature: { int64_list: { value: [2, 3] } }
-      feature: { int64_list: { value: [-2, -3] } }
-    }
-  }
-  feature_list: {
-    key: "reward" value: {
-      feature: { float_list: { value: [3.400000e+00] } }
-      feature: { float_list: { value: [-3.000000e+00] } }
-    }
-  }
-}
-)";
   std::string Result;
   raw_string_ostream OS(Result);
   L.print(OS);
-  EXPECT_EQ(Result, Expected);
+
+  tensorflow::SequenceExample Expected;
+  EXPECT_TRUE(Expected.ParseFromString(Result));
+  PROTO_CHECKER("the_float", float_list, 0, F00);
+  PROTO_CHECKER("the_float", float_list, 1, F10);
+  PROTO_CHECKER("alternate_name", int64_list, 0, F01);
+  PROTO_CHECKER("alternate_name", int64_list, 1, F11);
+  float R0[]{3.4};
+  float R1[]{-3.0};
+  PROTO_CHECKER("reward", float_list, 0, R0);
+  PROTO_CHECKER("reward", float_list, 1, R1);
 }
 
 TEST(TFUtilsTest, LoggerNoReward) {
@@ -199,34 +202,25 @@ TEST(TFUtilsTest, LoggerNoReward) {
 
   auto Rewards = TensorSpec::createSpec<float>("reward", {1});
   Logger L(Features, Rewards, false);
-  float F00[]{0.0, 0.1, 0.2, 0.3, 0.4, 0.5};
-  int64_t F01[]{2, 3};
+  const float F00[]{0.0, 0.1, 0.2, 0.3, 0.4, 0.5};
+  const int64_t F01[]{2, 3};
 
   L.logTensorValue(0, F00, 6);
   L.logTensorValue(1, F01, 2);
-  float F10[]{0.0, 1.0, 2.0, 3.0, 4.0, 5.0};
-  int64_t F11[]{-2, -3};
+  const float F10[]{0.0, 1.0, 2.0, 3.0, 4.0, 5.0};
+  const int64_t F11[]{-2, -3};
   L.logTensorValue(0, F10, 6);
   L.logTensorValue(1, F11, 2);
-  const auto *Expected = R"(feature_lists: {
-  feature_list: {
-    key: "the_float" value: {
-      feature: { float_list: { value: [0.000000e+00, 1.000000e-01, 2.000000e-01, 3.000000e-01, 4.000000e-01, 5.000000e-01] } }
-      feature: { float_list: { value: [0.000000e+00, 1.000000e+00, 2.000000e+00, 3.000000e+00, 4.000000e+00, 5.000000e+00] } }
-    }
-  }
-  feature_list: {
-    key: "alternate_name" value: {
-      feature: { int64_list: { value: [2, 3] } }
-      feature: { int64_list: { value: [-2, -3] } }
-    }
-  }
-}
-)";
+
   std::string Result;
   raw_string_ostream OS(Result);
   L.print(OS);
-  EXPECT_EQ(Result, Expected);
+  tensorflow::SequenceExample Expected;
+  EXPECT_TRUE(Expected.ParseFromString(Result));
+  PROTO_CHECKER("the_float", float_list, 0, F00);
+  PROTO_CHECKER("the_float", float_list, 1, F10);
+  PROTO_CHECKER("alternate_name", int64_list, 0, F01);
+  PROTO_CHECKER("alternate_name", int64_list, 1, F11);
 }
 
 TEST(TFUtilsTest, LoggerFinalReward) {
@@ -242,32 +236,14 @@ TEST(TFUtilsTest, LoggerFinalReward) {
     L.logTensorValue(1, &I);
   }
   L.logFinalReward<float>(3.14);
-  const auto *Expected = R"(feature_lists: {
-  feature_list: {
-    key: "the_float" value: {
-      feature: { float_list: { value: [0.000000e+00] } }
-      feature: { float_list: { value: [1.000000e+00] } }
-      feature: { float_list: { value: [2.000000e+00] } }
-    }
-  }
-  feature_list: {
-    key: "the_int" value: {
-      feature: { int64_list: { value: [0] } }
-      feature: { int64_list: { value: [1] } }
-      feature: { int64_list: { value: [2] } }
-    }
-  }
-  feature_list: {
-    key: "reward" value: {
-      feature: { float_list: { value: [0.000000e+00] } }
-      feature: { float_list: { value: [0.000000e+00] } }
-      feature: { float_list: { value: [3.140000e+00] } }
-    }
-  }
-}
-)";
   std::string Result;
   raw_string_ostream OS(Result);
   L.print(OS);
-  EXPECT_EQ(Result, Expected);
+  const float Zero[]{0.0};
+  const float R[]{3.14};
+  tensorflow::SequenceExample Expected;
+  EXPECT_TRUE(Expected.ParseFromString(Result));
+  PROTO_CHECKER("reward", float_list, 0, Zero);
+  PROTO_CHECKER("reward", float_list, 1, Zero);
+  PROTO_CHECKER("reward", float_list, 2, R);
 }


        


More information about the llvm-commits mailing list