<div dir="auto">Could you paste the cmake command line and the build command line, for repro? Thanks!</div><br><div class="gmail_quote"><div dir="ltr" class="gmail_attr">On Tue, Jul 14, 2020, 06:21 Florian Hahn <<a href="mailto:florian_hahn@apple.com">florian_hahn@apple.com</a>> wrote:<br></div><blockquote class="gmail_quote" style="margin:0 0 0 .8ex;border-left:1px #ccc solid;padding-left:1ex">Hi,<br>
<br>
<br>
The patch below seems to cause a build-failure on macOS caused by some undefined symbols (see below).<br>
<br>
It would be great if you could take a look.<br>
<br>
Cheers,<br>
Florian<br>
<br>
Undefined symbols for architecture x86_64:<br>
"llvm::InlineSizeEstimatorAnalysis::Key", referenced from:<br>
llvm::PassBuilder::registerFunctionAnalyses(llvm::AnalysisManager<llvm::Function>&) in libLLVMPasses.a(PassBuilder.cpp.o)<br>
llvm::detail::AnalysisResultModel<llvm::Function, llvm::InlineSizeEstimatorAnalysis, llvm::Optional<unsigned long>, llvm::PreservedAnalyses, llvm::AnalysisManager<llvm::Function>::Invalidator, false>::invalidate(llvm::Function&, llvm::PreservedAnalyses const&, llvm::AnalysisManager<llvm::Function>::Invalidator&) in libLLVMPasses.a(PassBuilder.cpp.o)<br>
llvm::RequireAnalysisPass<llvm::InlineSizeEstimatorAnalysis, llvm::Function, llvm::AnalysisManager<llvm::Function> >::run(llvm::Function&, llvm::AnalysisManager<llvm::Function>&) in libLLVMPasses.a(PassBuilder.cpp.o)<br>
llvm::detail::PassModel<llvm::Function, llvm::InvalidateAnalysisPass<llvm::InlineSizeEstimatorAnalysis>, llvm::PreservedAnalyses, llvm::AnalysisManager<llvm::Function> >::run(llvm::Function&, llvm::AnalysisManager<llvm::Function>&) in libLLVMPasses.a(PassBuilder.cpp.o)<br>
"llvm::InlineSizeEstimatorAnalysis::run(llvm::Function const&, llvm::AnalysisManager<llvm::Function>&)", referenced from:<br>
llvm::detail::AnalysisPassModel<llvm::Function, llvm::InlineSizeEstimatorAnalysis, llvm::PreservedAnalyses, llvm::AnalysisManager<llvm::Function>::Invalidator>::run(llvm::Function&, llvm::AnalysisManager<llvm::Function>&) in libLLVMPasses.a(PassBuilder.cpp.o)<br>
"llvm::InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis(llvm::InlineSizeEstimatorAnalysis&&)", referenced from:<br>
llvm::PassBuilder::registerFunctionAnalyses(llvm::AnalysisManager<llvm::Function>&) in libLLVMPasses.a(PassBuilder.cpp.o)<br>
"llvm::InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis()", referenced from:<br>
llvm::PassBuilder::registerFunctionAnalyses(llvm::AnalysisManager<llvm::Function>&) in libLLVMPasses.a(PassBuilder.cpp.o)<br>
"llvm::InlineSizeEstimatorAnalysis::~InlineSizeEstimatorAnalysis()", referenced from:<br>
llvm::PassBuilder::registerFunctionAnalyses(llvm::AnalysisManager<llvm::Function>&) in libLLVMPasses.a(PassBuilder.cpp.o)<br>
llvm::detail::AnalysisPassModel<llvm::Function, llvm::InlineSizeEstimatorAnalysis, llvm::PreservedAnalyses, llvm::AnalysisManager<llvm::Function>::Invalidator>::~AnalysisPassModel() in libLLVMPasses.a(PassBuilder.cpp.o)<br>
llvm::detail::AnalysisPassModel<llvm::Function, llvm::InlineSizeEstimatorAnalysis, llvm::PreservedAnalyses, llvm::AnalysisManager<llvm::Function>::Invalidator>::~AnalysisPassModel() in libLLVMPasses.a(PassBuilder.cpp.o)<br>
<br>
> On Jul 14, 2020, at 00:26, Mircea Trofin via llvm-commits <<a href="mailto:llvm-commits@lists.llvm.org" target="_blank" rel="noreferrer">llvm-commits@lists.llvm.org</a>> wrote:<br>
> <br>
> <br>
> Author: Mircea Trofin<br>
> Date: 2020-07-13T16:26:26-07:00<br>
> New Revision: caf395ee8c28028d5af0f1455cd5ef134432124c<br>
> <br>
> URL: <a href="https://github.com/llvm/llvm-project/commit/caf395ee8c28028d5af0f1455cd5ef134432124c" rel="noreferrer noreferrer" target="_blank">https://github.com/llvm/llvm-project/commit/caf395ee8c28028d5af0f1455cd5ef134432124c</a><br>
> DIFF: <a href="https://github.com/llvm/llvm-project/commit/caf395ee8c28028d5af0f1455cd5ef134432124c.diff" rel="noreferrer noreferrer" target="_blank">https://github.com/llvm/llvm-project/commit/caf395ee8c28028d5af0f1455cd5ef134432124c.diff</a><br>
> <br>
> LOG: Reapply "[llvm] Native size estimator for training -Oz inliner"<br>
> <br>
> This reverts commit 9908a3b9f521c954cbf6adcec35b14b2f6c8da49.<br>
> <br>
> The fix was to exclude the content of TFUtils.h (automatically<br>
> included in the LLVM_Analysis module, when LLVM_ENABLE_MODULES is enabled).<br>
> <br>
> Differential Revision: <a href="https://reviews.llvm.org/D82817" rel="noreferrer noreferrer" target="_blank">https://reviews.llvm.org/D82817</a><br>
> <br>
> Added: <br>
> llvm/include/llvm/Analysis/InlineSizeEstimatorAnalysis.h<br>
> llvm/include/llvm/Analysis/Utils/TFUtils.h<br>
> llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp<br>
> llvm/lib/Analysis/TFUtils.cpp<br>
> llvm/unittests/Analysis/InlineSizeEstimatorAnalysisTest.cpp<br>
> llvm/unittests/Analysis/Inputs/ir2native_x86_64_model/saved_model.pbtxt<br>
> llvm/unittests/Analysis/Inputs/ir2native_x86_64_model/variables/variables.data-00000-of-00001<br>
> llvm/unittests/Analysis/Inputs/ir2native_x86_64_model/variables/variables.index<br>
> llvm/unittests/Analysis/TFUtilsTest.cpp<br>
> <br>
> Modified: <br>
> llvm/CMakeLists.txt<br>
> llvm/lib/Analysis/CMakeLists.txt<br>
> llvm/lib/Passes/PassBuilder.cpp<br>
> llvm/lib/Passes/PassRegistry.def<br>
> llvm/unittests/Analysis/CMakeLists.txt<br>
> <br>
> Removed: <br>
> <br>
> <br>
> <br>
> ################################################################################<br>
> diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt<br>
> index de2887b64c2a..4e14e61fcacd 100644<br>
> --- a/llvm/CMakeLists.txt<br>
> +++ b/llvm/CMakeLists.txt<br>
> @@ -981,6 +981,18 @@ if (NOT TENSORFLOW_AOT_PATH STREQUAL "")<br>
> ${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/tf_runtime)<br>
> endif()<br>
> <br>
> +set(TENSORFLOW_C_LIB_PATH "" CACHE PATH "Path to TensorFlow C library install")<br>
> +find_library(tensorflow_c_api tensorflow PATHS ${TENSORFLOW_C_LIB_PATH}/lib)<br>
> +<br>
> +# Similar to the above Tensorflow dependency, please refer to the same script.<br>
> +# In this case, the latest C API library is available for download from<br>
> +# <a href="https://www.tensorflow.org/install/lang_c" rel="noreferrer noreferrer" target="_blank">https://www.tensorflow.org/install/lang_c</a><br>
> +if (tensorflow_c_api)<br>
> + set(LLVM_HAVE_TF_API "ON" CACHE BOOL "Full Tensorflow API available")<br>
> + add_definitions("-DLLVM_HAVE_TF_API")<br>
> + include_directories(${TENSORFLOW_C_LIB_PATH}/include)<br>
> +endif()<br>
> +<br>
> # Put this before tblgen. Else we have a circular dependence.<br>
> add_subdirectory(lib/Demangle)<br>
> add_subdirectory(lib/Support)<br>
> <br>
> diff --git a/llvm/include/llvm/Analysis/InlineSizeEstimatorAnalysis.h b/llvm/include/llvm/Analysis/InlineSizeEstimatorAnalysis.h<br>
> new file mode 100644<br>
> index 000000000000..29a6f5914674<br>
> --- /dev/null<br>
> +++ b/llvm/include/llvm/Analysis/InlineSizeEstimatorAnalysis.h<br>
> @@ -0,0 +1,35 @@<br>
> +//===- InlineSizeEstimatorAnalysis.h - ML size estimator --------*- C++ -*-===//<br>
> +//<br>
> +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.<br>
> +// See <a href="https://llvm.org/LICENSE.txt" rel="noreferrer noreferrer" target="_blank">https://llvm.org/LICENSE.txt</a> for license information.<br>
> +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception<br>
> +//<br>
> +//===----------------------------------------------------------------------===//<br>
> +//<br>
> +<br>
> +#ifndef LLVM_ANALYSIS_INLINESIZEESTIMATORANALYSIS_H<br>
> +#define LLVM_ANALYSIS_INLINESIZEESTIMATORANALYSIS_H<br>
> +<br>
> +#include "llvm/IR/PassManager.h"<br>
> +<br>
> +namespace llvm {<br>
> +class Function;<br>
> +<br>
> +class TFModelEvaluator;<br>
> +class InlineSizeEstimatorAnalysis<br>
> + : public AnalysisInfoMixin<InlineSizeEstimatorAnalysis> {<br>
> +public:<br>
> + InlineSizeEstimatorAnalysis();<br>
> + InlineSizeEstimatorAnalysis(InlineSizeEstimatorAnalysis &&);<br>
> + ~InlineSizeEstimatorAnalysis();<br>
> +<br>
> + static AnalysisKey Key;<br>
> + using Result = Optional<size_t>;<br>
> + Result run(const Function &F, FunctionAnalysisManager &FAM);<br>
> + static bool isEvaluatorRequested();<br>
> +<br>
> +private:<br>
> + std::unique_ptr<TFModelEvaluator> Evaluator;<br>
> +};<br>
> +} // namespace llvm<br>
> +#endif // LLVM_ANALYSIS_INLINESIZEESTIMATORANALYSIS_H<br>
> \ No newline at end of file<br>
> <br>
> diff --git a/llvm/include/llvm/Analysis/Utils/TFUtils.h b/llvm/include/llvm/Analysis/Utils/TFUtils.h<br>
> new file mode 100644<br>
> index 000000000000..b7de199753a6<br>
> --- /dev/null<br>
> +++ b/llvm/include/llvm/Analysis/Utils/TFUtils.h<br>
> @@ -0,0 +1,138 @@<br>
> +//===- TFUtils.h - utilities for tensorflow C API ---------------*- C++ -*-===//<br>
> +//<br>
> +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.<br>
> +// See <a href="https://llvm.org/LICENSE.txt" rel="noreferrer noreferrer" target="_blank">https://llvm.org/LICENSE.txt</a> for license information.<br>
> +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception<br>
> +//<br>
> +//===----------------------------------------------------------------------===//<br>
> +//<br>
> +#ifndef LLVM_ANALYSIS_UTILS_TFUTILS_H<br>
> +#define LLVM_ANALYSIS_UTILS_TFUTILS_H<br>
> +<br>
> +#ifdef LLVM_HAVE_TF_API<br>
> +#include "tensorflow/c/c_api.h"<br>
> +#include "llvm/IR/LLVMContext.h"<br>
> +<br>
> +#include <memory><br>
> +#include <vector><br>
> +<br>
> +namespace llvm {<br>
> +<br>
> +/// Load a SavedModel, find the given inputs and outputs, and setup storage<br>
> +/// for input tensors. The user is responsible for correctly dimensioning the<br>
> +/// input tensors and setting their values before calling evaluate().<br>
> +/// To initialize:<br>
> +/// - construct the object<br>
> +/// - initialize the input tensors using initInput. Indices must correspond to<br>
> +/// indices in the InputNames used at construction.<br>
> +/// To use:<br>
> +/// - set input values by using getInput to get each input tensor, and then<br>
> +/// setting internal scalars, for all dimensions (tensors are row-major:<br>
> +/// <a href="https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/c/c_api.h#L205" rel="noreferrer noreferrer" target="_blank">https://github.com/tensorflow/tensorflow/blob/r1.5/tensorflow/c/c_api.h#L205</a>)<br>
> +/// - prepare an output vector of TF_Output* type, with the correct number of<br>
> +/// outputs (i.e. same as OutputNames). Initialize the vector with nullptr<br>
> +/// values.<br>
> +/// - call evaluate. The input tensors' values are not consumed after this, and<br>
> +/// may still be read.<br>
> +/// - use the outputs in the output vector<br>
> +/// - deallocate each output tensor in the output vector, using TF_DeleteTensor.<br>
> +class TFModelEvaluator final {<br>
> +public:<br>
> + /// The result of a model evaluation. Handles the lifetime of the output<br>
> + /// TF_Tensor objects, which means that their values need to be used before<br>
> + /// the EvaluationResult's dtor is called.<br>
> + class EvaluationResult {<br>
> + public:<br>
> + ~EvaluationResult() {<br>
> + for (auto *P : Output)<br>
> + if (P)<br>
> + TF_DeleteTensor(P);<br>
> + }<br>
> +<br>
> + EvaluationResult(const EvaluationResult &) = delete;<br>
> + EvaluationResult(EvaluationResult &&Other)<br>
> + : OutputSize(Other.OutputSize), Output(std::move(Other.Output)) {<br>
> + Other.Output.clear();<br>
> + };<br>
> +<br>
> + /// Get a pointer to the first element of the tensor at Index.<br>
> + template <typename T> T *getTensorValue(size_t Index) {<br>
> + return static_cast<T *>(TF_TensorData(Output[Index]));<br>
> + }<br>
> +<br>
> + private:<br>
> + friend class TFModelEvaluator;<br>
> + EvaluationResult(size_t OutputSize)<br>
> + : OutputSize(OutputSize), Output(OutputSize){};<br>
> +<br>
> + const size_t OutputSize;<br>
> + std::vector<TF_Tensor *> Output;<br>
> + };<br>
> +<br>
> + using TFGraphPtr = std::unique_ptr<TF_Graph, decltype(&TF_DeleteGraph)>;<br>
> + using TFSessionOptionsPtr =<br>
> + std::unique_ptr<TF_SessionOptions, decltype(&TF_DeleteSessionOptions)>;<br>
> + using TFStatusPtr = std::unique_ptr<TF_Status, decltype(&TF_DeleteStatus)>;<br>
> +<br>
> + TFModelEvaluator(StringRef SavedModelPath,<br>
> + const std::vector<std::string> &InputNames,<br>
> + const std::vector<std::string> &OutputNames,<br>
> + const char *Tags = "serve");<br>
> + ~TFModelEvaluator();<br>
> + TFModelEvaluator(const TFModelEvaluator &) = delete;<br>
> + TFModelEvaluator(TFModelEvaluator &&) = delete;<br>
> +<br>
> + /// Evaluate the model, assuming it is valid. Returns None if the evaluation<br>
> + /// fails or the model is invalid, or an EvaluationResult otherwise. The<br>
> + /// inputs are assumed to have been already provided via getInput(). When<br>
> + /// returning None, it also marks the object invalid. Pass an Output vector<br>
> + /// with the same size as OutputNames, but with nullptr values. evaluate()<br>
> + /// will populate it with tensors, matching in index the corresponding<br>
> + /// OutputNames. The caller is responsible for the deallocation of those<br>
> + /// tensors, using TF_DeleteTensor.<br>
> + Optional<EvaluationResult> evaluate();<br>
> +<br>
> + /// Provides access to the input vector. It is already dimensioned correctly,<br>
> + /// but the values need to be allocated by the user.<br>
> + std::vector<TF_Tensor *> &getInput() { return Input; }<br>
> +<br>
> + /// Returns true if the tensorflow model was loaded successfully, false<br>
> + /// otherwise.<br>
> + bool isValid() const { return !!Session; }<br>
> +<br>
> + /// Initialize the input at Index as a tensor of the given type and dimensions<br>
> + void initInput(int Index, TF_DataType Type,<br>
> + const std::vector<int64_t> &Dimensions);<br>
> +<br>
> +private:<br>
> + /// The objects necessary for carrying out an evaluation of the SavedModel.<br>
> + /// They are expensive to set up, and we maintain them accross all the<br>
> + /// evaluations of the model.<br>
> + TF_Session *Session = nullptr;<br>
> + TFGraphPtr Graph;<br>
> + TFSessionOptionsPtr Options;<br>
> +<br>
> + /// The specification of the input nodes.<br>
> + std::vector<TF_Output> InputFeed;<br>
> +<br>
> + /// The input tensors. They must match by index of the corresponding InputFeed<br>
> + /// value. We set up the tensors once and just mutate theirs scalars before<br>
> + /// each evaluation. The input tensors keep their value after an evaluation.<br>
> + std::vector<TF_Tensor *> Input;<br>
> +<br>
> + /// The specification of the output nodes. When evaluating, the tensors in the<br>
> + /// output tensor vector must match by index the corresponding element in the<br>
> + /// OutputFeed.<br>
> + std::vector<TF_Output> OutputFeed;<br>
> +<br>
> + /// Reusable utility for deleting the session.<br>
> + void deleteSession();<br>
> +<br>
> + /// Reusable utility for ensuring we can bind the requested Name to a node in<br>
> + /// the SavedModel Graph.<br>
> + bool checkReportAndReset(const TF_Output &Output, StringRef Name);<br>
> +};<br>
> +} // namespace llvm<br>
> +<br>
> +#endif // LLVM_HAVE_TF_API<br>
> +#endif // LLVM_ANALYSIS_UTILS_TFUTILS_H<br>
> <br>
> diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt<br>
> index a317579ecc83..703623396d96 100644<br>
> --- a/llvm/lib/Analysis/CMakeLists.txt<br>
> +++ b/llvm/lib/Analysis/CMakeLists.txt<br>
> @@ -1,17 +1,35 @@<br>
> set(CommonMLSources MLInlineAdvisor.cpp)<br>
> set(ReleaseModeMLSources ReleaseModeModelRunner.cpp)<br>
> +set(DevelopmentModeMLSources TFUtils.cpp)<br>
> <br>
> -if (DEFINED LLVM_HAVE_TF_AOT)<br>
> - include(TensorFlowCompile)<br>
> - tfcompile(models/inliner serve action InlinerSizeModel llvm::InlinerSizeModel)<br>
> - list(APPEND ReleaseModeMLSources<br>
> - $<TARGET_OBJECTS:tf_xla_runtime_objects><br>
> - ${GENERATED_OBJS}<br>
> - )<br>
> - set(MLPolicySources ${CommonMLSources} ${ReleaseModeMLSources})<br>
> +if (DEFINED LLVM_HAVE_TF_AOT OR DEFINED LLVM_HAVE_TF_API)<br>
> + set(MLPolicySources ${CommonMLSources})<br>
> + if (DEFINED LLVM_HAVE_TF_AOT)<br>
> + include(TensorFlowCompile)<br>
> + tfcompile(models/inliner serve action InlinerSizeModel llvm::InlinerSizeModel)<br>
> + list(APPEND ReleaseModeMLSources<br>
> + $<TARGET_OBJECTS:tf_xla_runtime_objects><br>
> + ${GENERATED_OBJS}<br>
> + )<br>
> + LIST(APPEND MLPolicySources ${ReleaseModeMLSources})<br>
> + else()<br>
> + LIST(APPEND LLVM_OPTIONAL_SOURCES ${ReleaseModeMLSources})<br>
> + endif()<br>
> +<br>
> + if (DEFINED LLVM_HAVE_TF_API)<br>
> + LIST(APPEND MLPolicySources ${DevelopmentModeMLSources})<br>
> + LIST(APPEND MLLinkDeps ${tensorflow_c_api})<br>
> + else()<br>
> + LIST(APPEND LLVM_OPTIONAL_SOURCES ${DevelopmentModeMLSources})<br>
> + endif()<br>
> else()<br>
> - set(LLVM_OPTIONAL_SOURCES ${CommonMLSources} ${ReleaseModeMLSources})<br>
> + LIST(APPEND LLVM_OPTIONAL_SOURCES <br>
> + ${CommonMLSources}<br>
> + ${DevelopmentModeMLSources}<br>
> + ${ReleaseModeMLSources}<br>
> + )<br>
> endif()<br>
> + <br>
> <br>
> add_llvm_component_library(LLVMAnalysis<br>
> AliasAnalysis.cpp<br>
> @@ -57,6 +75,7 @@ add_llvm_component_library(LLVMAnalysis<br>
> InlineCost.cpp<br>
> InlineAdvisor.cpp<br>
> InlineFeaturesAnalysis.cpp<br>
> + InlineSizeEstimatorAnalysis.cpp<br>
> InstCount.cpp<br>
> InstructionPrecedenceTracking.cpp<br>
> InstructionSimplify.cpp<br>
> @@ -124,4 +143,7 @@ add_llvm_component_library(LLVMAnalysis<br>
> <br>
> DEPENDS<br>
> intrinsics_gen<br>
> +<br>
> + LINK_LIBS<br>
> + ${MLLinkDeps}<br>
> )<br>
> <br>
> diff --git a/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp b/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp<br>
> new file mode 100644<br>
> index 000000000000..1d1952ae6cbb<br>
> --- /dev/null<br>
> +++ b/llvm/lib/Analysis/InlineSizeEstimatorAnalysis.cpp<br>
> @@ -0,0 +1,299 @@<br>
> +//===- InlineSizeEstimatorAnalysis.cpp - IR to native size from ML model --===//<br>
> +//<br>
> +// The LLVM Compiler Infrastructure<br>
> +//<br>
> +// This file is distributed under the University of Illinois Open Source<br>
> +// License. See LICENSE.TXT for details.<br>
> +//<br>
> +//===----------------------------------------------------------------------===//<br>
> +//<br>
> +// This implements feature and label extraction for offline supervised learning<br>
> +// of a IR to native size model.<br>
> +//<br>
> +//===----------------------------------------------------------------------===//<br>
> +#include "llvm/Analysis/InlineSizeEstimatorAnalysis.h"<br>
> +<br>
> +#ifdef LLVM_HAVE_TF_API<br>
> +#include "llvm/Analysis/Utils/TFUtils.h"<br>
> +#endif<br>
> +#include "llvm/Analysis/LoopInfo.h"<br>
> +#include "llvm/Analysis/TargetLibraryInfo.h"<br>
> +#include "llvm/Analysis/TargetTransformInfo.h"<br>
> +#include "llvm/IR/BasicBlock.h"<br>
> +#include "llvm/IR/Dominators.h"<br>
> +#include "llvm/IR/Function.h"<br>
> +#include "llvm/IR/Instructions.h"<br>
> +#include "llvm/IR/PassManager.h"<br>
> +#include "llvm/MC/MCAsmLayout.h"<br>
> +#include "llvm/Support/Casting.h"<br>
> +#include "llvm/Support/CommandLine.h"<br>
> +#include "llvm/Support/raw_ostream.h"<br>
> +<br>
> +#include <algorithm><br>
> +#include <deque><br>
> +<br>
> +using namespace llvm;<br>
> +<br>
> +AnalysisKey InlineSizeEstimatorAnalysis::Key;<br>
> +<br>
> +#define DEBUG_TYPE "inline-size-estimator"<br>
> +<br>
> +#ifdef LLVM_HAVE_TF_API<br>
> +cl::opt<std::string> TFIR2NativeModelPath(<br>
> + "ml-inliner-ir2native-model", cl::Hidden,<br>
> + cl::desc("Path to saved model evaluating native size from IR."));<br>
> +<br>
> +namespace {<br>
> +unsigned getMaxInstructionID() {<br>
> +#define LAST_OTHER_INST(NR) return NR;<br>
> +#include "llvm/IR/Instruction.def"<br>
> +}<br>
> +<br>
> +class IRToNativeSizeLearning {<br>
> +public:<br>
> + enum class NamedFeatureIndex : size_t {<br>
> + InitialSize,<br>
> + Blocks,<br>
> + Calls,<br>
> + IsLocal,<br>
> + IsLinkOnceODR,<br>
> + IsLinkOnce,<br>
> + Loops,<br>
> + MaxLoopDepth,<br>
> + MaxDomTreeLevel,<br>
> +<br>
> + NumNamedFeatures<br>
> + };<br>
> + static const size_t NumNamedFeatures =<br>
> + static_cast<size_t>(NamedFeatureIndex::NumNamedFeatures);<br>
> + struct FunctionFeatures {<br>
> + static std::vector<std::pair<size_t, size_t>><br>
> + ImportantInstructionSuccessions;<br>
> + static const size_t FeatureCount;<br>
> +<br>
> + std::array<int32_t, NumNamedFeatures> NamedFeatures = {0};<br>
> + std::vector<int32_t> InstructionHistogram;<br>
> + std::vector<int32_t> InstructionPairHistogram;<br>
> +<br>
> + void fillTensor(int32_t *Ptr) const;<br>
> + int32_t &operator[](NamedFeatureIndex Pos) {<br>
> + return NamedFeatures[static_cast<size_t>(Pos)];<br>
> + }<br>
> + };<br>
> + IRToNativeSizeLearning() = default;<br>
> +<br>
> + static FunctionFeatures getFunctionFeatures(Function &F,<br>
> + FunctionAnalysisManager &FAM);<br>
> +<br>
> +private:<br>
> + /// Sort once the feature tuples.<br>
> + struct SortFeatureTuples {<br>
> + bool IsSorted = false;<br>
> + SortFeatureTuples() {<br>
> + std::sort(FunctionFeatures::ImportantInstructionSuccessions.begin(),<br>
> + FunctionFeatures::ImportantInstructionSuccessions.end());<br>
> + IsSorted = true;<br>
> + }<br>
> + };<br>
> +<br>
> + static llvm::ManagedStatic<SortFeatureTuples> TupleSorter;<br>
> +<br>
> + static bool ensureSortedTuples() { return TupleSorter->IsSorted; }<br>
> +};<br>
> +llvm::ManagedStatic<IRToNativeSizeLearning::SortFeatureTuples><br>
> + IRToNativeSizeLearning::TupleSorter;<br>
> +<br>
> +// This is a point in time - we determined including these pairs of<br>
> +// consecutive instructions (in the IR layout available at inline time) as<br>
> +// features improves the model performance. We want to move away from manual<br>
> +// feature selection.<br>
> +// The vector is given in opcode pairs rather than labels because 1) labels<br>
> +// weren't readily available, and 2) the successions were hand - extracted<br>
> +std::vector<std::pair<size_t, size_t>><br>
> + IRToNativeSizeLearning::FunctionFeatures::ImportantInstructionSuccessions =<br>
> + {{1, 34}, {15, 27}, {53, 53}, {53, 34}, {1, 11}, {32, 2}, {2, 48},<br>
> + {28, 48}, {1, 45}, {49, 32}, {57, 56}, {55, 53}, {1, 28}, {57, 34},<br>
> + {1, 1}, {32, 28}, {32, 15}, {49, 28}, {53, 1}, {2, 53}, {48, 34},<br>
> + {28, 53}, {2, 32}, {1, 40}, {32, 48}, {29, 56}, {56, 32}, {55, 56},<br>
> + {48, 56}, {1, 31}, {33, 34}, {2, 28}, {1, 12}, {55, 1}, {31, 31},<br>
> + {65, 1}, {33, 56}, {32, 32}, {13, 13}, {1, 26}, {13, 26}, {2, 1},<br>
> + {1, 33}, {47, 49}, {64, 1}, {2, 38}, {34, 53}, {48, 2}, {55, 34},<br>
> + {34, 32}, {1, 5}, {56, 13}, {2, 2}, {2, 49}, {33, 2}, {49, 39},<br>
> + {56, 49}, {33, 49}, {32, 39}, {39, 57}, {29, 33}, {31, 34}, {32, 29},<br>
> + {47, 15}, {13, 34}, {2, 33}, {32, 49}, {49, 34}, {56, 33}, {1, 30},<br>
> + {33, 33}, {31, 33}, {2, 29}, {56, 7}, {32, 13}, {2, 55}, {56, 56},<br>
> + {2, 34}, {1, 42}, {34, 49}, {1, 20}, {32, 33}, {1, 25}, {53, 28},<br>
> + {1, 14}, {31, 49}, {28, 2}, {2, 13}, {2, 56}, {1, 32}, {56, 53},<br>
> + {65, 65}, {33, 53}, {64, 64}, {13, 2}, {34, 33}, {1, 4}, {49, 2},<br>
> + {1, 9}, {56, 1}, {33, 1}, {53, 57}, {32, 53}, {13, 56}, {32, 56},<br>
> + {55, 55}, {1, 18}, {49, 56}, {34, 34}, {1, 7}, {56, 64}, {32, 1},<br>
> + {13, 33}, {55, 28}, {49, 33}, {57, 57}, {56, 34}, {34, 56}, {33, 32},<br>
> + {32, 40}, {1, 29}, {53, 2}, {34, 1}, {32, 34}, {49, 49}, {1, 24},<br>
> + {40, 34}, {1, 13}, {38, 34}, {29, 2}, {34, 2}, {1, 39}, {1, 22},<br>
> + {1, 27}, {49, 1}, {1, 8}, {56, 2}};<br>
> +<br>
> +// We have: 9 calculated features (the features here); 1 feature for each<br>
> +// instruction opcode; and 1 feature for each manually-identified sequence.<br>
> +// For the latter 2, we build a histogram: we count the number of<br>
> +// occurrences of each instruction opcode or succession of instructions,<br>
> +// respectively.<br>
> +// Note that instruction opcodes start from 1. For convenience, we also have an<br>
> +// always 0 feature for the '0' opcode, hence the extra 1.<br>
> +const size_t IRToNativeSizeLearning::FunctionFeatures::FeatureCount =<br>
> + IRToNativeSizeLearning::FunctionFeatures::ImportantInstructionSuccessions<br>
> + .size() +<br>
> + getMaxInstructionID() + 1 + IRToNativeSizeLearning::NumNamedFeatures;<br>
> +<br>
> +size_t getSize(Function &F, TargetTransformInfo &TTI) {<br>
> + size_t Ret = 0;<br>
> + for (auto &BB : F)<br>
> + for (auto &I : BB)<br>
> + Ret += TTI.getInstructionCost(<br>
> + &I, TargetTransformInfo::TargetCostKind::TCK_CodeSize);<br>
> + return Ret;<br>
> +}<br>
> +<br>
> +size_t getSize(Function &F, FunctionAnalysisManager &FAM) {<br>
> + auto &TTI = FAM.getResult<TargetIRAnalysis>(F);<br>
> + return getSize(F, TTI);<br>
> +}<br>
> +<br>
> +unsigned getMaxDominatorTreeDepth(const Function &F,<br>
> + const DominatorTree &Tree) {<br>
> + unsigned Ret = 0;<br>
> + for (auto &BB : F)<br>
> + if (auto *TN = Tree.getNode(&BB))<br>
> + Ret = std::max(Ret, TN->getLevel());<br>
> + return Ret;<br>
> +}<br>
> +} // namespace<br>
> +<br>
> +IRToNativeSizeLearning::FunctionFeatures<br>
> +IRToNativeSizeLearning::getFunctionFeatures(Function &F,<br>
> + FunctionAnalysisManager &FAM) {<br>
> + assert(ensureSortedTuples() && "expected lazy initialization");<br>
> +<br>
> + auto &DomTree = FAM.getResult<DominatorTreeAnalysis>(F);<br>
> + FunctionFeatures FF;<br>
> + size_t InstrCount = getMaxInstructionID() + 1;<br>
> + FF.InstructionHistogram.resize(InstrCount);<br>
> +<br>
> + FF.InstructionPairHistogram.resize(<br>
> + FunctionFeatures::ImportantInstructionSuccessions.size());<br>
> +<br>
> + auto StartID = 0;<br>
> + auto LastID = StartID;<br>
> + auto getPairIndex = [](size_t a, size_t b) {<br>
> + auto I =<br>
> + std::find(FunctionFeatures::ImportantInstructionSuccessions.begin(),<br>
> + FunctionFeatures::ImportantInstructionSuccessions.end(),<br>
> + std::make_pair(a, b));<br>
> + if (I == FunctionFeatures::ImportantInstructionSuccessions.end())<br>
> + return -1;<br>
> + return static_cast<int>(std::distance(<br>
> + FunctionFeatures::ImportantInstructionSuccessions.begin(), I));<br>
> + };<br>
> +<br>
> + // We don't want debug calls, because they'd just add noise.<br>
> + for (auto &BB : F) {<br>
> + for (auto I = BB.instructionsWithoutDebug().begin(),<br>
> + E = BB.instructionsWithoutDebug().end();<br>
> + I != E; ++I) {<br>
> + auto ID = I->getOpcode();<br>
> +<br>
> + ++FF.InstructionHistogram[ID];<br>
> + int PairIndex = getPairIndex(LastID, ID);<br>
> + if (PairIndex >= 0)<br>
> + ++FF.InstructionPairHistogram[PairIndex];<br>
> + LastID = ID;<br>
> + if (isa<CallBase>(*I))<br>
> + ++FF[NamedFeatureIndex::Calls];<br>
> + }<br>
> + }<br>
> +<br>
> + FF[NamedFeatureIndex::InitialSize] = getSize(F, FAM);<br>
> + FF[NamedFeatureIndex::IsLocal] = F.hasLocalLinkage();<br>
> + FF[NamedFeatureIndex::IsLinkOnceODR] = F.hasLinkOnceODRLinkage();<br>
> + FF[NamedFeatureIndex::IsLinkOnce] = F.hasLinkOnceLinkage();<br>
> + FF[NamedFeatureIndex::Blocks] =<br>
> + std::distance(F.getBasicBlockList().begin(), F.getBasicBlockList().end());<br>
> + auto &LI = FAM.getResult<LoopAnalysis>(F);<br>
> + FF[NamedFeatureIndex::Loops] = std::distance(LI.begin(), LI.end());<br>
> + for (auto &L : LI)<br>
> + FF[NamedFeatureIndex::MaxLoopDepth] =<br>
> + std::max(FF[NamedFeatureIndex::MaxLoopDepth],<br>
> + static_cast<int32_t>(L->getLoopDepth()));<br>
> + FF[NamedFeatureIndex::MaxDomTreeLevel] = getMaxDominatorTreeDepth(F, DomTree);<br>
> + return FF;<br>
> +}<br>
> +<br>
> +void IRToNativeSizeLearning::FunctionFeatures::fillTensor(int32_t *Ptr) const {<br>
> + std::copy(NamedFeatures.begin(), NamedFeatures.end(), Ptr);<br>
> + Ptr += NamedFeatures.size();<br>
> + std::copy(InstructionHistogram.begin(), InstructionHistogram.end(), Ptr);<br>
> + Ptr += InstructionHistogram.size();<br>
> + std::copy(InstructionPairHistogram.begin(), InstructionPairHistogram.end(),<br>
> + Ptr);<br>
> +}<br>
> +<br>
> +bool InlineSizeEstimatorAnalysis::isEvaluatorRequested() {<br>
> + return !TFIR2NativeModelPath.empty();<br>
> +}<br>
> +<br>
> +InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis() {<br>
> + if (!isEvaluatorRequested()) {<br>
> + return;<br>
> + }<br>
> + std::vector<std::string> InputNames{"serving_default_input_1"};<br>
> + std::vector<std::string> OutputName{"StatefulPartitionedCall"};<br>
> + Evaluator = std::make_unique<TFModelEvaluator>(<br>
> + TFIR2NativeModelPath.getValue().c_str(), InputNames, OutputName);<br>
> + if (!Evaluator || !Evaluator->isValid()) {<br>
> + Evaluator.reset();<br>
> + return;<br>
> + }<br>
> + static const std::vector<int64_t> Dim{<br>
> + 1, static_cast<int64_t>(<br>
> + IRToNativeSizeLearning::FunctionFeatures::FeatureCount)};<br>
> +<br>
> + Evaluator->initInput(0, TF_INT32, Dim);<br>
> +}<br>
> +<br>
> +InlineSizeEstimatorAnalysis::Result<br>
> +InlineSizeEstimatorAnalysis::run(const Function &F,<br>
> + FunctionAnalysisManager &FAM) {<br>
> + if (!Evaluator)<br>
> + return None;<br>
> + auto Features = IRToNativeSizeLearning::getFunctionFeatures(<br>
> + const_cast<Function &>(F), FAM);<br>
> + int32_t *V = static_cast<int32_t *>(TF_TensorData(Evaluator->getInput()[0]));<br>
> + Features.fillTensor(V);<br>
> + auto ER = Evaluator->evaluate();<br>
> + if (!ER)<br>
> + return None;<br>
> + float Ret = *ER->getTensorValue<float>(0);<br>
> + if (Ret < 0.0)<br>
> + Ret = 0.0;<br>
> + return static_cast<size_t>(Ret);<br>
> +}<br>
> +<br>
> +InlineSizeEstimatorAnalysis::~InlineSizeEstimatorAnalysis() {}<br>
> +InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis(<br>
> + InlineSizeEstimatorAnalysis &&Other)<br>
> + : Evaluator(std::move(Other.Evaluator)) {}<br>
> +<br>
> +#else<br>
> +namespace llvm {<br>
> +class TFModelEvaluator {};<br>
> +} // namespace llvm<br>
> +InlineSizeEstimatorAnalysis::InlineSizeEstimatorAnalysis() {}<br>
> +InlineSizeEstimatorAnalysis ::InlineSizeEstimatorAnalysis(<br>
> + InlineSizeEstimatorAnalysis &&) {}<br>
> +InlineSizeEstimatorAnalysis::~InlineSizeEstimatorAnalysis() {}<br>
> +InlineSizeEstimatorAnalysis::Result<br>
> +InlineSizeEstimatorAnalysis::run(const Function &F,<br>
> + FunctionAnalysisManager &FAM) {<br>
> + return None;<br>
> +}<br>
> +bool InlineSizeEstimatorAnalysis::isEvaluatorRequested() { return false; }<br>
> +#endif<br>
> \ No newline at end of file<br>
> <br>
> diff --git a/llvm/lib/Analysis/TFUtils.cpp b/llvm/lib/Analysis/TFUtils.cpp<br>
> new file mode 100644<br>
> index 000000000000..6cd5b5c9b4ea<br>
> --- /dev/null<br>
> +++ b/llvm/lib/Analysis/TFUtils.cpp<br>
> @@ -0,0 +1,143 @@<br>
> +//===- TFUtils.cpp - tensorflow evaluation utilities ----------------------===//<br>
> +//<br>
> +// The LLVM Compiler Infrastructure<br>
> +//<br>
> +// This file is distributed under the University of Illinois Open Source<br>
> +// License. See LICENSE.TXT for details.<br>
> +//<br>
> +//===----------------------------------------------------------------------===//<br>
> +//<br>
> +// This file implements utilities for interfacing with tensorflow C APIs.<br>
> +//<br>
> +//===----------------------------------------------------------------------===//<br>
> +<br>
> +#include "llvm/Analysis/Utils/TFUtils.h"<br>
> +#include "llvm/ADT/Twine.h"<br>
> +#include "llvm/Support/Debug.h"<br>
> +#include "llvm/Support/ManagedStatic.h"<br>
> +#include "llvm/Support/raw_ostream.h"<br>
> +<br>
> +#include "tensorflow/c/c_api_experimental.h"<br>
> +<br>
> +#include <cassert><br>
> +<br>
> +using namespace llvm;<br>
> +<br>
> +namespace {<br>
> +<br>
> +struct TFInitializer {<br>
> + TFInitializer() {<br>
> + assert(!IsInitialized && "TFInitialized should be called only once");<br>
> + int Argc = 1;<br>
> + const char *Name = "";<br>
> + const char **NamePtr = &Name;<br>
> + TF_InitMain(Name, &Argc, const_cast<char ***>(&NamePtr));<br>
> + IsInitialized = true;<br>
> + }<br>
> + bool IsInitialized = false;<br>
> +};<br>
> +<br>
> +llvm::ManagedStatic<TFInitializer> TFLibInitializer;<br>
> +<br>
> +bool ensureInitTF() { return TFLibInitializer->IsInitialized; }<br>
> +<br>
> +TFModelEvaluator::TFGraphPtr createTFGraph() {<br>
> + return TFModelEvaluator::TFGraphPtr(TF_NewGraph(), &TF_DeleteGraph);<br>
> +}<br>
> +<br>
> +TFModelEvaluator::TFStatusPtr createTFStatus() {<br>
> + return TFModelEvaluator::TFStatusPtr(TF_NewStatus(), &TF_DeleteStatus);<br>
> +}<br>
> +<br>
> +TFModelEvaluator::TFSessionOptionsPtr createTFSessionOptions() {<br>
> + return TFModelEvaluator::TFSessionOptionsPtr(TF_NewSessionOptions(),<br>
> + &TF_DeleteSessionOptions);<br>
> +}<br>
> +} // namespace<br>
> +<br>
> +TFModelEvaluator::TFModelEvaluator(StringRef SavedModelPath,<br>
> + const std::vector<std::string> &InputNames,<br>
> + const std::vector<std::string> &OutputNames,<br>
> + const char *Tags)<br>
> + : Graph(createTFGraph()), Options(createTFSessionOptions()),<br>
> + InputFeed(InputNames.size()), Input(InputNames.size()),<br>
> + OutputFeed(OutputNames.size()) {<br>
> + if (!ensureInitTF()) {<br>
> + errs() << "Tensorflow should have been initialized";<br>
> + return;<br>
> + }<br>
> + auto Status = createTFStatus();<br>
> +<br>
> + Session = TF_LoadSessionFromSavedModel(Options.get(), nullptr,<br>
> + SavedModelPath.str().c_str(), &Tags, 1,<br>
> + Graph.get(), nullptr, Status.get());<br>
> + if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {<br>
> + errs() << TF_Message(Status.get());<br>
> + deleteSession();<br>
> + }<br>
> + for (size_t I = 0; I < InputNames.size(); ++I) {<br>
> + InputFeed[I] = {<br>
> + TF_GraphOperationByName(Graph.get(), (InputNames[I]).c_str()), 0};<br>
> + if (!checkReportAndReset(InputFeed[I], InputNames[I]))<br>
> + return;<br>
> + }<br>
> + for (size_t I = 0; I < OutputNames.size(); ++I) {<br>
> + OutputFeed[I] = {<br>
> + TF_GraphOperationByName(Graph.get(), (OutputNames[I]).c_str()), 0};<br>
> + if (!checkReportAndReset(OutputFeed[I], OutputNames[I]))<br>
> + return;<br>
> + }<br>
> +}<br>
> +<br>
> +TFModelEvaluator::~TFModelEvaluator() {<br>
> + for (auto *T : Input) {<br>
> + TF_DeleteTensor(T);<br>
> + }<br>
> + deleteSession();<br>
> +}<br>
> +<br>
> +bool TFModelEvaluator::checkReportAndReset(const TF_Output &Output,<br>
> + StringRef Name) {<br>
> + if (Output.oper)<br>
> + return true;<br>
> + errs() << "Could not find TF_Output named: " + Name;<br>
> + deleteSession();<br>
> + return false;<br>
> +}<br>
> +<br>
> +void TFModelEvaluator::deleteSession() {<br>
> + if (Session == nullptr)<br>
> + return;<br>
> + auto Status = createTFStatus();<br>
> + TF_DeleteSession(Session, Status.get());<br>
> + Session = nullptr;<br>
> + if (TF_GetCode(Status.get()) != TF_Code::TF_OK)<br>
> + errs() << "Could not delete TF session";<br>
> +}<br>
> +<br>
> +Optional<TFModelEvaluator::EvaluationResult> TFModelEvaluator::evaluate() {<br>
> + if (!isValid())<br>
> + return None;<br>
> + EvaluationResult Ret(OutputFeed.size());<br>
> + auto Status = createTFStatus();<br>
> + TF_SessionRun(Session, nullptr, InputFeed.data(), Input.data(), Input.size(),<br>
> + OutputFeed.data(), Ret.Output.data(), Ret.Output.size(),<br>
> + nullptr, 0, nullptr, Status.get());<br>
> + if (TF_GetCode(Status.get()) != TF_Code::TF_OK) {<br>
> + errs() << TF_Message(Status.get());<br>
> + deleteSession();<br>
> + return None;<br>
> + }<br>
> + return Ret;<br>
> +}<br>
> +<br>
> +void TFModelEvaluator::initInput(int Index, TF_DataType Type,<br>
> + const std::vector<int64_t> &Dimensions) {<br>
> + int64_t TotalSize = TF_DataTypeSize(Type);<br>
> + for (auto &D : Dimensions)<br>
> + TotalSize *= D;<br>
> +<br>
> + Input[Index] =<br>
> + TF_AllocateTensor(Type, Dimensions.data(), Dimensions.size(), TotalSize);<br>
> + std::memset(TF_TensorData(Input[Index]), 0, TotalSize);<br>
> +}<br>
> \ No newline at end of file<br>
> <br>
> diff --git a/llvm/lib/Passes/PassBuilder.cpp b/llvm/lib/Passes/PassBuilder.cpp<br>
> index 53158e7aabab..537d300fee55 100644<br>
> --- a/llvm/lib/Passes/PassBuilder.cpp<br>
> +++ b/llvm/lib/Passes/PassBuilder.cpp<br>
> @@ -35,6 +35,7 @@<br>
> #include "llvm/Analysis/IVUsers.h"<br>
> #include "llvm/Analysis/InlineAdvisor.h"<br>
> #include "llvm/Analysis/InlineFeaturesAnalysis.h"<br>
> +#include "llvm/Analysis/InlineSizeEstimatorAnalysis.h"<br>
> #include "llvm/Analysis/LazyCallGraph.h"<br>
> #include "llvm/Analysis/LazyValueInfo.h"<br>
> #include "llvm/Analysis/LoopAccessAnalysis.h"<br>
> <br>
> diff --git a/llvm/lib/Passes/PassRegistry.def b/llvm/lib/Passes/PassRegistry.def<br>
> index eb2b740db561..dfdfc3d05976 100644<br>
> --- a/llvm/lib/Passes/PassRegistry.def<br>
> +++ b/llvm/lib/Passes/PassRegistry.def<br>
> @@ -133,6 +133,7 @@ FUNCTION_ANALYSIS("loops", LoopAnalysis())<br>
> FUNCTION_ANALYSIS("lazy-value-info", LazyValueAnalysis())<br>
> FUNCTION_ANALYSIS("da", DependenceAnalysis())<br>
> FUNCTION_ANALYSIS("inliner-features", InlineFeaturesAnalysis())<br>
> +FUNCTION_ANALYSIS("inliner-size-estimator", InlineSizeEstimatorAnalysis())<br>
> FUNCTION_ANALYSIS("memdep", MemoryDependenceAnalysis())<br>
> FUNCTION_ANALYSIS("memoryssa", MemorySSAAnalysis())<br>
> FUNCTION_ANALYSIS("phi-values", PhiValuesAnalysis())<br>
> <br>
> diff --git a/llvm/unittests/Analysis/CMakeLists.txt b/llvm/unittests/Analysis/CMakeLists.txt<br>
> index 42f7dd3c0610..59ad444d32fb 100644<br>
> --- a/llvm/unittests/Analysis/CMakeLists.txt<br>
> +++ b/llvm/unittests/Analysis/CMakeLists.txt<br>
> @@ -6,7 +6,13 @@ set(LLVM_LINK_COMPONENTS<br>
> TransformUtils<br>
> )<br>
> <br>
> -add_llvm_unittest(AnalysisTests<br>
> +if (DEFINED LLVM_HAVE_TF_API)<br>
> + LIST(APPEND EXTRA_TESTS TFUtilsTest.cpp)<br>
> +else()<br>
> + LIST(APPEND LLVM_OPTIONAL_SOURCES TFUtilsTest.cpp)<br>
> +endif()<br>
> +<br>
> +add_llvm_unittest_with_input_files(AnalysisTests<br>
> AliasAnalysisTest.cpp<br>
> AliasSetTrackerTest.cpp<br>
> AssumeBundleQueriesTest.cpp<br>
> @@ -22,6 +28,7 @@ add_llvm_unittest(AnalysisTests<br>
> DomTreeUpdaterTest.cpp<br>
> GlobalsModRefTest.cpp<br>
> InlineFeaturesAnalysisTest.cpp<br>
> + InlineSizeEstimatorAnalysisTest.cpp<br>
> IVDescriptorsTest.cpp<br>
> LazyCallGraphTest.cpp<br>
> LoadsTest.cpp<br>
> @@ -40,4 +47,7 @@ add_llvm_unittest(AnalysisTests<br>
> ValueLatticeTest.cpp<br>
> ValueTrackingTest.cpp<br>
> VectorUtilsTest.cpp<br>
> + ${EXTRA_TESTS}<br>
> )<br>
> +<br>
> + target_link_libraries(AnalysisTests PRIVATE LLVMTestingSupport)<br>
> <br>
> diff --git a/llvm/unittests/Analysis/InlineSizeEstimatorAnalysisTest.cpp b/llvm/unittests/Analysis/InlineSizeEstimatorAnalysisTest.cpp<br>
> new file mode 100644<br>
> index 000000000000..377590be016a<br>
> --- /dev/null<br>
> +++ b/llvm/unittests/Analysis/InlineSizeEstimatorAnalysisTest.cpp<br>
> @@ -0,0 +1,101 @@<br>
> +//===- InlineSizeEstimatorAnalysisTest.cpp - test for ir2native -----------===//<br>
> +//<br>
> +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.<br>
> +// See <a href="https://llvm.org/LICENSE.txt" rel="noreferrer noreferrer" target="_blank">https://llvm.org/LICENSE.txt</a> for license information.<br>
> +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception<br>
> +//<br>
> +//===----------------------------------------------------------------------===//<br>
> +<br>
> +#include "llvm/Analysis/InlineSizeEstimatorAnalysis.h"<br>
> +#include "llvm/Analysis/LoopInfo.h"<br>
> +#include "llvm/Analysis/TargetLibraryInfo.h"<br>
> +#include "llvm/Analysis/TargetTransformInfo.h"<br>
> +#include "llvm/AsmParser/Parser.h"<br>
> +#include "llvm/IR/Dominators.h"<br>
> +#include "llvm/IR/Instructions.h"<br>
> +#include "llvm/IR/LLVMContext.h"<br>
> +#include "llvm/IR/Module.h"<br>
> +#include "llvm/Support/CommandLine.h"<br>
> +#include "llvm/Support/Path.h"<br>
> +#include "llvm/Support/SourceMgr.h"<br>
> +#include "llvm/Testing/Support/SupportHelpers.h"<br>
> +#include "gtest/gtest.h"<br>
> +<br>
> +using namespace llvm;<br>
> +<br>
> +extern const char *TestMainArgv0;<br>
> +extern cl::opt<std::string> TFIR2NativeModelPath;<br>
> +<br>
> +#if LLVM_HAVE_TF_API<br>
> +static std::string getModelPath() {<br>
> + SmallString<128> InputsDir = unittest::getInputFileDirectory(TestMainArgv0);<br>
> + llvm::sys::path::append(InputsDir, "ir2native_x86_64_model");<br>
> + return std::string(InputsDir);<br>
> +}<br>
> +#endif<br>
> +<br>
> +static std::unique_ptr<Module> parseIR(LLVMContext &C, const char *IR) {<br>
> + SMDiagnostic Err;<br>
> + std::unique_ptr<Module> Mod = parseAssemblyString(IR, Err, C);<br>
> + if (!Mod)<br>
> + Err.print("MLAnalysisTests", errs());<br>
> + return Mod;<br>
> +}<br>
> +<br>
> +static FunctionAnalysisManager buildFAM() {<br>
> + FunctionAnalysisManager FAM;<br>
> + FAM.registerPass([&] { return DominatorTreeAnalysis(); });<br>
> + FAM.registerPass([&] { return PassInstrumentationAnalysis(); });<br>
> + FAM.registerPass([&] { return TargetIRAnalysis(); });<br>
> + FAM.registerPass([&] { return LoopAnalysis(); });<br>
> + return FAM;<br>
> +}<br>
> +<br>
> +// Test model loading and evaluation.<br>
> +TEST(InlineSizeEstimatorAnalysis, SizeIsValidTest) {<br>
> + LLVMContext C;<br>
> + std::unique_ptr<Module> M = parseIR(C,<br>
> + R"IR(<br>
> +target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"<br>
> +target triple = "x86_64-pc-linux-gnu"<br>
> +<br>
> +declare i32 @f1(i32)<br>
> +declare i32 @f2(i32)<br>
> +<br>
> +define i32 @branches(i32) {<br>
> + %cond = icmp slt i32 %0, 3<br>
> + br i1 %cond, label %then, label %else<br>
> +<br>
> +then:<br>
> + %ret.1 = call i32 @f1(i32 %0)<br>
> + br label %last.block<br>
> +<br>
> +else:<br>
> + %ret.2 = call i32 @f2(i32 %0)<br>
> + br label %last.block<br>
> +<br>
> +last.block:<br>
> + %ret = phi i32 [%ret.1, %then], [%ret.2, %else]<br>
> + ret i32 %ret<br>
> +}<br>
> +<br>
> +define internal i32 @top() {<br>
> + %1 = call i32 @branches(i32 2)<br>
> + %2 = call i32 @f1(i32 %1)<br>
> + ret i32 %2<br>
> +}<br>
> +)IR");<br>
> +<br>
> + FunctionAnalysisManager FAM = buildFAM();<br>
> +#if LLVM_HAVE_TF_API<br>
> + TFIR2NativeModelPath = getModelPath();<br>
> +#endif<br>
> +<br>
> + InlineSizeEstimatorAnalysis FA;<br>
> + auto SizeEstimate = FA.run(*M->getFunction("branches"), FAM);<br>
> +#if LLVM_HAVE_TF_API<br>
> + EXPECT_GT(*SizeEstimate, 0);<br>
> +#else<br>
> + EXPECT_FALSE(SizeEstimate.hasValue());<br>
> +#endif<br>
> +}<br>
> <br>
> diff --git a/llvm/unittests/Analysis/Inputs/ir2native_x86_64_model/saved_model.pbtxt b/llvm/unittests/Analysis/Inputs/ir2native_x86_64_model/saved_model.pbtxt<br>
> new file mode 100644<br>
> index 000000000000..6efdad51083d<br>
> --- /dev/null<br>
> +++ b/llvm/unittests/Analysis/Inputs/ir2native_x86_64_model/saved_model.pbtxt<br>
> @@ -0,0 +1,10596 @@<br>
> +saved_model_schema_version: 1<br>
> +meta_graphs {<br>
> + meta_info_def {<br>
> + stripped_op_list {<br>
> + op {<br>
> + name: "Const"<br>
> + output_arg {<br>
> + name: "output"<br>
> + type_attr: "dtype"<br>
> + }<br>
> + attr {<br>
> + name: "value"<br>
> + type: "tensor"<br>
> + }<br>
> + attr {<br>
> + name: "dtype"<br>
> + type: "type"<br>
> + }<br>
> + }<br>
> + op {<br>
> + name: "NoOp"<br>
> + }<br>
> + op {<br>
> + name: "Placeholder"<br>
> + output_arg {<br>
> + name: "output"<br>
> + type_attr: "dtype"<br>
> + }<br>
> + attr {<br>
> + name: "dtype"<br>
> + type: "type"<br>
> + }<br>
> + attr {<br>
> + name: "shape"<br>
> + type: "shape"<br>
> + default_value {<br>
> + shape {<br>
> + unknown_rank: true<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + op {<br>
> + name: "ReadVariableOp"<br>
> + input_arg {<br>
> + name: "resource"<br>
> + type: DT_RESOURCE<br>
> + }<br>
> + output_arg {<br>
> + name: "value"<br>
> + type_attr: "dtype"<br>
> + }<br>
> + attr {<br>
> + name: "dtype"<br>
> + type: "type"<br>
> + }<br>
> + is_stateful: true<br>
> + }<br>
> + op {<br>
> + name: "StatefulPartitionedCall"<br>
> + input_arg {<br>
> + name: "args"<br>
> + type_list_attr: "Tin"<br>
> + }<br>
> + output_arg {<br>
> + name: "output"<br>
> + type_list_attr: "Tout"<br>
> + }<br>
> + attr {<br>
> + name: "Tin"<br>
> + type: "list(type)"<br>
> + has_minimum: true<br>
> + }<br>
> + attr {<br>
> + name: "Tout"<br>
> + type: "list(type)"<br>
> + has_minimum: true<br>
> + }<br>
> + attr {<br>
> + name: "f"<br>
> + type: "func"<br>
> + }<br>
> + attr {<br>
> + name: "config"<br>
> + type: "string"<br>
> + default_value {<br>
> + s: ""<br>
> + }<br>
> + }<br>
> + attr {<br>
> + name: "config_proto"<br>
> + type: "string"<br>
> + default_value {<br>
> + s: ""<br>
> + }<br>
> + }<br>
> + attr {<br>
> + name: "executor_type"<br>
> + type: "string"<br>
> + default_value {<br>
> + s: ""<br>
> + }<br>
> + }<br>
> + is_stateful: true<br>
> + }<br>
> + op {<br>
> + name: "VarHandleOp"<br>
> + output_arg {<br>
> + name: "resource"<br>
> + type: DT_RESOURCE<br>
> + }<br>
> + attr {<br>
> + name: "container"<br>
> + type: "string"<br>
> + default_value {<br>
> + s: ""<br>
> + }<br>
> + }<br>
> + attr {<br>
> + name: "shared_name"<br>
> + type: "string"<br>
> + default_value {<br>
> + s: ""<br>
> + }<br>
> + }<br>
> + attr {<br>
> + name: "dtype"<br>
> + type: "type"<br>
> + }<br>
> + attr {<br>
> + name: "shape"<br>
> + type: "shape"<br>
> + }<br>
> + is_stateful: true<br>
> + }<br>
> + }<br>
> + tags: "serve"<br>
> + tensorflow_version: "1.15.0"<br>
> + tensorflow_git_version: "unknown"<br>
> + stripped_default_attrs: true<br>
> + }<br>
> + graph_def {<br>
> + node {<br>
> + name: "dense/kernel"<br>
> + op: "VarHandleOp"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shape"<br>
> + value {<br>
> + shape {<br>
> + dim {<br>
> + size: 214<br>
> + }<br>
> + dim {<br>
> + size: 100<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shared_name"<br>
> + value {<br>
> + s: "dense/kernel"<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "dense/kernel/Read/ReadVariableOp"<br>
> + op: "ReadVariableOp"<br>
> + input: "dense/kernel"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + dim {<br>
> + size: 214<br>
> + }<br>
> + dim {<br>
> + size: 100<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "dense/bias"<br>
> + op: "VarHandleOp"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shape"<br>
> + value {<br>
> + shape {<br>
> + dim {<br>
> + size: 100<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shared_name"<br>
> + value {<br>
> + s: "dense/bias"<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "dense/bias/Read/ReadVariableOp"<br>
> + op: "ReadVariableOp"<br>
> + input: "dense/bias"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + dim {<br>
> + size: 100<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "dense_1/kernel"<br>
> + op: "VarHandleOp"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shape"<br>
> + value {<br>
> + shape {<br>
> + dim {<br>
> + size: 100<br>
> + }<br>
> + dim {<br>
> + size: 1<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shared_name"<br>
> + value {<br>
> + s: "dense_1/kernel"<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "dense_1/kernel/Read/ReadVariableOp"<br>
> + op: "ReadVariableOp"<br>
> + input: "dense_1/kernel"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + dim {<br>
> + size: 100<br>
> + }<br>
> + dim {<br>
> + size: 1<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "dense_1/bias"<br>
> + op: "VarHandleOp"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shape"<br>
> + value {<br>
> + shape {<br>
> + dim {<br>
> + size: 1<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shared_name"<br>
> + value {<br>
> + s: "dense_1/bias"<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "dense_1/bias/Read/ReadVariableOp"<br>
> + op: "ReadVariableOp"<br>
> + input: "dense_1/bias"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + dim {<br>
> + size: 1<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "total"<br>
> + op: "VarHandleOp"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shape"<br>
> + value {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shared_name"<br>
> + value {<br>
> + s: "total"<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "total/Read/ReadVariableOp"<br>
> + op: "ReadVariableOp"<br>
> + input: "total"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "count"<br>
> + op: "VarHandleOp"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shape"<br>
> + value {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shared_name"<br>
> + value {<br>
> + s: "count"<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "count/Read/ReadVariableOp"<br>
> + op: "ReadVariableOp"<br>
> + input: "count"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "total_1"<br>
> + op: "VarHandleOp"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shape"<br>
> + value {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shared_name"<br>
> + value {<br>
> + s: "total_1"<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "total_1/Read/ReadVariableOp"<br>
> + op: "ReadVariableOp"<br>
> + input: "total_1"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "count_1"<br>
> + op: "VarHandleOp"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shape"<br>
> + value {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shared_name"<br>
> + value {<br>
> + s: "count_1"<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "count_1/Read/ReadVariableOp"<br>
> + op: "ReadVariableOp"<br>
> + input: "count_1"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "NoOp"<br>
> + op: "NoOp"<br>
> + }<br>
> + node {<br>
> + name: "Const"<br>
> + op: "Const"<br>
> + device: "/device:CPU:0"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_STRING<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "value"<br>
> + value {<br>
> + tensor {<br>
> + dtype: DT_STRING<br>
> + tensor_shape {<br>
> + }<br>
> + string_val: "\n\277\001\n\030\010\001\022\024layer_with_weights-0\n\013\010\001\022\007layer-0\n\030\010\002\022\024layer_with_weights-1\n\013\010\002\022\007layer-1\n\r\010\003\022\toptimizer\n\031\010\004\022\025regularization_losses\n\r\010\005\022\tvariables\n\027\010\006\022\023trainable_variables\n\r\010\007\022\tkeras_api\n\016\010\010\022\nsignatures\nh\n\n\010\t\022\006kernel\n\010\010\n\022\004bias\n\031\010\013\022\025regularization_losses\n\r\010\014\022\tvariables\n\027\010\r\022\023trainable_variables\n\r\010\016\022\tkeras_api\nh\n\n\010\017\022\006kernel\n\010\010\020\022\004bias\n\031\010\021\022\025regularization_losses\n\r\010\022\022\tvariables\n\027\010\023\022\023trainable_variables\n\r\010\024\022\tkeras_api\n\000\n\000\n\034\n\005\010\t\022\0010\n\005\010\n\022\0011\n\005\010\017\022\0012\n\005\010\020\022\0013\n\034\n\005\010\t\022\0010\n\005\010\n\022\0011\n\005\010\017\022\0012\n\005\010\020\022\0013\n\255\001\n\n\010\025\022\006layers\n\037\010\026\022\033layer_regularization_losses\n\033\010\027\022\027non_trainable_variables\n\021\010\030\022\rlayer_metrics\n\031\010\004\022\025regularization_losses\n\013\010\031\022\007metrics\n\r\010\005\022\tvariables\n\027\010\006\022\023trainable_variables\n\000\nX\022V\n\016VARIABLE_VALUE\022\014dense/kernel\0326layer_with_weights-0/kernel/.ATTRIBUTES/VARIABLE_VALUE\nT\022R\n\016VARIABLE_VALUE\022\ndense/bias\0324layer_with_weights-0/bias/.ATTRIBUTES/VARIABLE_VALUE\n\000\n\016\n\005\010\t\022\0010\n\005\010\n\022\0011\n\016\n\005\010\t\022\0010\n\005\010\n\022\0011\n\255\001\n\n\010\032\022\006layers\n\037\010\033\022\033layer_regularization_losses\n\033\010\034\022\027non_trainable_variables\n\021\010\035\022\rlayer_metrics\n\031\010\013\022\025regularization_losses\n\013\010\036\022\007metrics\n\r\010\014\022\tvariables\n\027\010\r\022\023trainable_variables\nZ\022X\n\016VARIABLE_VALUE\022\016dense_1/kernel\0326layer_with_weights-1/kernel/.ATTRIBUTES/VARIABLE_VALUE\nV\022T\n\016VARIABLE_VALUE\022\014dense_1/bias\0324layer_with_weights-1/bias/.ATTRIBUTES/VARIABLE_VALUE\n\000\n\016\n\005\010\017\022\0010\n\005\010\020\022\0011\n\016\n\005\010\017\022\0010\n\005\010\020\022\0011\n\255\001\n\n\010\037\022\006layers\n\037\010 \022\033layer_regularization_losses\n\033\010!\022\027non_trainable_variables\n\021\010\"\022\rlayer_metrics\n\031\010\021\022\025regularization_losses\n\013\010#\022\007metrics\n\r\010\022\022\tvariables\n\027\010\023\022\023trainable_variables\n\016\n\005\010\001\022\0010\n\005\010\002\022\0011\n\000\n\000\n\000\n\016\n\005\010$\022\0010\n\005\010%\022\0011\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n\000\n4\n\t\010&\022\005total\n\t\010\'\022\005count\n\r\010(\022\tvariables\n\r\010)\022\tkeras_api\nD\n\t\010*\022\005total\n\t\010+\022\005count\n\016\010,\022\n_fn_kwargs\n\r\010-\022\tvariables\n\r\010.\022\tkeras_api\nO\022M\n\016VARIABLE_VALUE\022\005total\0324keras_api/metrics/0/total/.ATTRIBUTES/VARIABLE_VALUE\nO\022M\n\016VARIABLE_VALUE\022\005count\0324keras_api/metrics/0/count/.ATTRIBUTES/VARIABLE_VALUE\n\016\n\005\010&\022\0010\n\005\010\'\022\0011\n\017\n\r\010(\022\tvariables\nQ\022O\n\016VARIABLE_VALUE\022\007total_1\0324keras_api/metrics/1/total/.ATTRIBUTES/VARIABLE_VALUE\nQ\022O\n\016VARIABLE_VALUE\022\007count_1\0324keras_api/metrics/1/count/.ATTRIBUTES/VARIABLE_VALUE\n\000\n\016\n\005\010*\022\0010\n\005\010+\022\0011\n\017\n\r\010-\022\tvariables"<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "serving_default_input_1"<br>
> + op: "Placeholder"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + dim {<br>
> + size: -1<br>
> + }<br>
> + dim {<br>
> + size: 214<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_INT32<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shape"<br>
> + value {<br>
> + shape {<br>
> + dim {<br>
> + size: -1<br>
> + }<br>
> + dim {<br>
> + size: 214<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "StatefulPartitionedCall"<br>
> + op: "StatefulPartitionedCall"<br>
> + input: "serving_default_input_1"<br>
> + input: "dense/kernel"<br>
> + input: "dense/bias"<br>
> + input: "dense_1/kernel"<br>
> + input: "dense_1/bias"<br>
> + attr {<br>
> + key: "Tin"<br>
> + value {<br>
> + list {<br>
> + type: DT_INT32<br>
> + type: DT_RESOURCE<br>
> + type: DT_RESOURCE<br>
> + type: DT_RESOURCE<br>
> + type: DT_RESOURCE<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "Tout"<br>
> + value {<br>
> + list {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "_collective_manager_ids"<br>
> + value {<br>
> + list {<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + dim {<br>
> + size: -1<br>
> + }<br>
> + dim {<br>
> + size: 1<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "_read_only_resource_inputs"<br>
> + value {<br>
> + list {<br>
> + i: 1<br>
> + i: 2<br>
> + i: 3<br>
> + i: 4<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "config_proto"<br>
> + value {<br>
> + s: "\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0002\002J\0008\001"<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "f"<br>
> + value {<br>
> + func {<br>
> + name: "__inference_signature_wrapper_6671"<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "saver_filename"<br>
> + op: "Placeholder"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_STRING<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "shape"<br>
> + value {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "StatefulPartitionedCall_1"<br>
> + op: "StatefulPartitionedCall"<br>
> + input: "saver_filename"<br>
> + input: "dense/kernel/Read/ReadVariableOp"<br>
> + input: "dense/bias/Read/ReadVariableOp"<br>
> + input: "dense_1/kernel/Read/ReadVariableOp"<br>
> + input: "dense_1/bias/Read/ReadVariableOp"<br>
> + input: "total/Read/ReadVariableOp"<br>
> + input: "count/Read/ReadVariableOp"<br>
> + input: "total_1/Read/ReadVariableOp"<br>
> + input: "count_1/Read/ReadVariableOp"<br>
> + input: "Const"<br>
> + attr {<br>
> + key: "Tin"<br>
> + value {<br>
> + list {<br>
> + type: DT_STRING<br>
> + type: DT_FLOAT<br>
> + type: DT_FLOAT<br>
> + type: DT_FLOAT<br>
> + type: DT_FLOAT<br>
> + type: DT_FLOAT<br>
> + type: DT_FLOAT<br>
> + type: DT_FLOAT<br>
> + type: DT_FLOAT<br>
> + type: DT_STRING<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "Tout"<br>
> + value {<br>
> + list {<br>
> + type: DT_STRING<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "_collective_manager_ids"<br>
> + value {<br>
> + list {<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "_read_only_resource_inputs"<br>
> + value {<br>
> + list {<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "config_proto"<br>
> + value {<br>
> + s: "\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0002\002J\0008\001"<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "f"<br>
> + value {<br>
> + func {<br>
> + name: "__inference__traced_save_6824"<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + node {<br>
> + name: "StatefulPartitionedCall_2"<br>
> + op: "StatefulPartitionedCall"<br>
> + input: "saver_filename"<br>
> + input: "dense/kernel"<br>
> + input: "dense/bias"<br>
> + input: "dense_1/kernel"<br>
> + input: "dense_1/bias"<br>
> + input: "total"<br>
> + input: "count"<br>
> + input: "total_1"<br>
> + input: "count_1"<br>
> + attr {<br>
> + key: "Tin"<br>
> + value {<br>
> + list {<br>
> + type: DT_STRING<br>
> + type: DT_RESOURCE<br>
> + type: DT_RESOURCE<br>
> + type: DT_RESOURCE<br>
> + type: DT_RESOURCE<br>
> + type: DT_RESOURCE<br>
> + type: DT_RESOURCE<br>
> + type: DT_RESOURCE<br>
> + type: DT_RESOURCE<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "Tout"<br>
> + value {<br>
> + list {<br>
> + type: DT_STRING<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "_collective_manager_ids"<br>
> + value {<br>
> + list {<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "_read_only_resource_inputs"<br>
> + value {<br>
> + list {<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "config_proto"<br>
> + value {<br>
> + s: "\n\007\n\003CPU\020\001\n\007\n\003GPU\020\0002\002J\0008\001"<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "f"<br>
> + value {<br>
> + func {<br>
> + name: "__inference__traced_restore_6860"<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + library {<br>
> + function {<br>
> + signature {<br>
> + name: "__inference__traced_restore_6860"<br>
> + input_arg {<br>
> + name: "file_prefix"<br>
> + type: DT_STRING<br>
> + }<br>
> + input_arg {<br>
> + name: "assignvariableop_dense_kernel"<br>
> + type: DT_RESOURCE<br>
> + }<br>
> + input_arg {<br>
> + name: "assignvariableop_1_dense_bias"<br>
> + type: DT_RESOURCE<br>
> + }<br>
> + input_arg {<br>
> + name: "assignvariableop_2_dense_1_kernel"<br>
> + type: DT_RESOURCE<br>
> + }<br>
> + input_arg {<br>
> + name: "assignvariableop_3_dense_1_bias"<br>
> + type: DT_RESOURCE<br>
> + }<br>
> + input_arg {<br>
> + name: "assignvariableop_4_total"<br>
> + type: DT_RESOURCE<br>
> + }<br>
> + input_arg {<br>
> + name: "assignvariableop_5_count"<br>
> + type: DT_RESOURCE<br>
> + }<br>
> + input_arg {<br>
> + name: "assignvariableop_6_total_1"<br>
> + type: DT_RESOURCE<br>
> + }<br>
> + input_arg {<br>
> + name: "assignvariableop_7_count_1"<br>
> + type: DT_RESOURCE<br>
> + }<br>
> + output_arg {<br>
> + name: "identity_9"<br>
> + type: DT_STRING<br>
> + }<br>
> + is_stateful: true<br>
> + control_output: "AssignVariableOp"<br>
> + control_output: "AssignVariableOp_1"<br>
> + control_output: "AssignVariableOp_2"<br>
> + control_output: "AssignVariableOp_3"<br>
> + control_output: "AssignVariableOp_4"<br>
> + control_output: "AssignVariableOp_5"<br>
> + control_output: "AssignVariableOp_6"<br>
> + control_output: "AssignVariableOp_7"<br>
> + control_output: "RestoreV2"<br>
> + control_output: "RestoreV2_1"<br>
> + }<br>
> + node_def {<br>
> + name: "RestoreV2/tensor_names"<br>
> + op: "Const"<br>
> + device: "/device:CPU:0"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + dim {<br>
> + size: 8<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_STRING<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "value"<br>
> + value {<br>
> + tensor {<br>
> + dtype: DT_STRING<br>
> + tensor_shape {<br>
> + dim {<br>
> + size: 8<br>
> + }<br>
> + }<br>
> + string_val: "layer_with_weights-0/kernel/.ATTRIBUTES/VARIABLE_VALUE"<br>
> + string_val: "layer_with_weights-0/bias/.ATTRIBUTES/VARIABLE_VALUE"<br>
> + string_val: "layer_with_weights-1/kernel/.ATTRIBUTES/VARIABLE_VALUE"<br>
> + string_val: "layer_with_weights-1/bias/.ATTRIBUTES/VARIABLE_VALUE"<br>
> + string_val: "keras_api/metrics/0/total/.ATTRIBUTES/VARIABLE_VALUE"<br>
> + string_val: "keras_api/metrics/0/count/.ATTRIBUTES/VARIABLE_VALUE"<br>
> + string_val: "keras_api/metrics/1/total/.ATTRIBUTES/VARIABLE_VALUE"<br>
> + string_val: "keras_api/metrics/1/count/.ATTRIBUTES/VARIABLE_VALUE"<br>
> + }<br>
> + }<br>
> + }<br>
> + experimental_debug_info {<br>
> + original_node_names: "RestoreV2/tensor_names"<br>
> + }<br>
> + }<br>
> + node_def {<br>
> + name: "RestoreV2/shape_and_slices"<br>
> + op: "Const"<br>
> + device: "/device:CPU:0"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + dim {<br>
> + size: 8<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_STRING<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "value"<br>
> + value {<br>
> + tensor {<br>
> + dtype: DT_STRING<br>
> + tensor_shape {<br>
> + dim {<br>
> + size: 8<br>
> + }<br>
> + }<br>
> + string_val: ""<br>
> + string_val: ""<br>
> + string_val: ""<br>
> + string_val: ""<br>
> + string_val: ""<br>
> + string_val: ""<br>
> + string_val: ""<br>
> + string_val: ""<br>
> + }<br>
> + }<br>
> + }<br>
> + experimental_debug_info {<br>
> + original_node_names: "RestoreV2/shape_and_slices"<br>
> + }<br>
> + }<br>
> + node_def {<br>
> + name: "RestoreV2"<br>
> + op: "RestoreV2"<br>
> + input: "file_prefix"<br>
> + input: "RestoreV2/tensor_names:output:0"<br>
> + input: "RestoreV2/shape_and_slices:output:0"<br>
> + device: "/device:CPU:0"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + unknown_rank: true<br>
> + }<br>
> + shape {<br>
> + unknown_rank: true<br>
> + }<br>
> + shape {<br>
> + unknown_rank: true<br>
> + }<br>
> + shape {<br>
> + unknown_rank: true<br>
> + }<br>
> + shape {<br>
> + unknown_rank: true<br>
> + }<br>
> + shape {<br>
> + unknown_rank: true<br>
> + }<br>
> + shape {<br>
> + unknown_rank: true<br>
> + }<br>
> + shape {<br>
> + unknown_rank: true<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtypes"<br>
> + value {<br>
> + list {<br>
> + type: DT_FLOAT<br>
> + type: DT_FLOAT<br>
> + type: DT_FLOAT<br>
> + type: DT_FLOAT<br>
> + type: DT_FLOAT<br>
> + type: DT_FLOAT<br>
> + type: DT_FLOAT<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + }<br>
> + experimental_debug_info {<br>
> + original_node_names: "RestoreV2"<br>
> + }<br>
> + }<br>
> + node_def {<br>
> + name: "Identity"<br>
> + op: "Identity"<br>
> + input: "RestoreV2:tensors:0"<br>
> + attr {<br>
> + key: "T"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + unknown_rank: true<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + experimental_debug_info {<br>
> + original_node_names: "Identity"<br>
> + }<br>
> + }<br>
> + node_def {<br>
> + name: "AssignVariableOp"<br>
> + op: "AssignVariableOp"<br>
> + input: "assignvariableop_dense_kernel"<br>
> + input: "Identity:output:0"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + experimental_debug_info {<br>
> + original_node_names: "AssignVariableOp"<br>
> + }<br>
> + }<br>
> + node_def {<br>
> + name: "Identity_1"<br>
> + op: "Identity"<br>
> + input: "RestoreV2:tensors:1"<br>
> + attr {<br>
> + key: "T"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + unknown_rank: true<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + experimental_debug_info {<br>
> + original_node_names: "Identity_1"<br>
> + }<br>
> + }<br>
> + node_def {<br>
> + name: "AssignVariableOp_1"<br>
> + op: "AssignVariableOp"<br>
> + input: "assignvariableop_1_dense_bias"<br>
> + input: "Identity_1:output:0"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + experimental_debug_info {<br>
> + original_node_names: "AssignVariableOp_1"<br>
> + }<br>
> + }<br>
> + node_def {<br>
> + name: "Identity_2"<br>
> + op: "Identity"<br>
> + input: "RestoreV2:tensors:2"<br>
> + attr {<br>
> + key: "T"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + unknown_rank: true<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + experimental_debug_info {<br>
> + original_node_names: "Identity_2"<br>
> + }<br>
> + }<br>
> + node_def {<br>
> + name: "AssignVariableOp_2"<br>
> + op: "AssignVariableOp"<br>
> + input: "assignvariableop_2_dense_1_kernel"<br>
> + input: "Identity_2:output:0"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + experimental_debug_info {<br>
> + original_node_names: "AssignVariableOp_2"<br>
> + }<br>
> + }<br>
> + node_def {<br>
> + name: "Identity_3"<br>
> + op: "Identity"<br>
> + input: "RestoreV2:tensors:3"<br>
> + attr {<br>
> + key: "T"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + unknown_rank: true<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + experimental_debug_info {<br>
> + original_node_names: "Identity_3"<br>
> + }<br>
> + }<br>
> + node_def {<br>
> + name: "AssignVariableOp_3"<br>
> + op: "AssignVariableOp"<br>
> + input: "assignvariableop_3_dense_1_bias"<br>
> + input: "Identity_3:output:0"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + experimental_debug_info {<br>
> + original_node_names: "AssignVariableOp_3"<br>
> + }<br>
> + }<br>
> + node_def {<br>
> + name: "Identity_4"<br>
> + op: "Identity"<br>
> + input: "RestoreV2:tensors:4"<br>
> + attr {<br>
> + key: "T"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + unknown_rank: true<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + experimental_debug_info {<br>
> + original_node_names: "Identity_4"<br>
> + }<br>
> + }<br>
> + node_def {<br>
> + name: "AssignVariableOp_4"<br>
> + op: "AssignVariableOp"<br>
> + input: "assignvariableop_4_total"<br>
> + input: "Identity_4:output:0"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + experimental_debug_info {<br>
> + original_node_names: "AssignVariableOp_4"<br>
> + }<br>
> + }<br>
> + node_def {<br>
> + name: "Identity_5"<br>
> + op: "Identity"<br>
> + input: "RestoreV2:tensors:5"<br>
> + attr {<br>
> + key: "T"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + shape {<br>
> + unknown_rank: true<br>
> + }<br>
> + }<br>
> + }<br>
> + }<br>
> + experimental_debug_info {<br>
> + original_node_names: "Identity_5"<br>
> + }<br>
> + }<br>
> + node_def {<br>
> + name: "AssignVariableOp_5"<br>
> + op: "AssignVariableOp"<br>
> + input: "assignvariableop_5_count"<br>
> + input: "Identity_5:output:0"<br>
> + attr {<br>
> + key: "_output_shapes"<br>
> + value {<br>
> + list {<br>
> + }<br>
> + }<br>
> + }<br>
> + attr {<br>
> + key: "dtype"<br>
> + value {<br>
> + type: DT_FLOAT<br>
> + }<br>
> + }<br>
> + experimental_debug_info {<br>
> + original_node_names: "</blockquote></div>