[llvm] c1e2f73 - [llvm][NFC] expose LLVM_HAVE_TF_API through llvm-config.h
Mircea Trofin via llvm-commits
llvm-commits at lists.llvm.org
Tue Jul 14 14:09:44 PDT 2020
Author: Mircea Trofin
Date: 2020-07-14T14:09:35-07:00
New Revision: c1e2f73c392c111dc40de09daa71245f640ca9f5
URL: https://github.com/llvm/llvm-project/commit/c1e2f73c392c111dc40de09daa71245f640ca9f5
DIFF: https://github.com/llvm/llvm-project/commit/c1e2f73c392c111dc40de09daa71245f640ca9f5.diff
LOG: [llvm][NFC] expose LLVM_HAVE_TF_API through llvm-config.h
Summary:
This allows users of the llvm library discover whether llvm was built
with the tensorflow c API dependency, which helps if using the TFUtils
wrapper, for example.
We don't do the same for the LLVM_HAVE_TF_AOT flag, because that does
not expose any API.
Reviewers: mehdi_amini, davidxl
Subscribers: mgorny, aaron.ballman, llvm-commits
Tags: #llvm
Differential Revision: https://reviews.llvm.org/D83746
Added:
Modified:
llvm/CMakeLists.txt
llvm/include/llvm/Analysis/Utils/TFUtils.h
llvm/include/llvm/Config/llvm-config.h.cmake
llvm/unittests/Analysis/InlineSizeEstimatorAnalysisTest.cpp
Removed:
################################################################################
diff --git a/llvm/CMakeLists.txt b/llvm/CMakeLists.txt
index 4e14e61fcacd..eacf8d5e5501 100644
--- a/llvm/CMakeLists.txt
+++ b/llvm/CMakeLists.txt
@@ -832,6 +832,21 @@ configure_file(
${LLVM_INCLUDE_DIR}/llvm/Config/Targets.def
)
+# For up-to-date instructions for installing the Tensorflow dependency, refer to
+# the bot setup script: https://github.com/google/ml-compiler-opt/blob/master/buildbot/buildbot_init.sh
+# In this case, the latest C API library is available for download from
+# https://www.tensorflow.org/install/lang_c.
+# We will expose the conditional compilation variable,
+# LLVM_HAVE_TF_API, through llvm-config.h, so that a user of the LLVM library may
+# also leverage the dependency.
+set(TENSORFLOW_C_LIB_PATH "" CACHE PATH "Path to TensorFlow C library install")
+find_library(tensorflow_c_api tensorflow PATHS ${TENSORFLOW_C_LIB_PATH}/lib)
+
+if (tensorflow_c_api)
+ set(LLVM_HAVE_TF_API "ON" CACHE BOOL "Full Tensorflow API available")
+ include_directories(${TENSORFLOW_C_LIB_PATH}/include)
+endif()
+
# Configure the three LLVM configuration header files.
configure_file(
${LLVM_MAIN_INCLUDE_DIR}/llvm/Config/config.h.cmake
@@ -972,27 +987,18 @@ set(TENSORFLOW_AOT_PATH "" CACHE PATH "Path to TensorFlow pip install dir")
if (NOT TENSORFLOW_AOT_PATH STREQUAL "")
set(LLVM_HAVE_TF_AOT "ON" CACHE BOOL "Tensorflow AOT available")
- set(TENSORFLOW_AOT_COMPILER
- "${TENSORFLOW_AOT_PATH}/../../../../bin/saved_model_cli"
- CACHE PATH "Path to the Tensorflow AOT compiler")
+ set(TENSORFLOW_AOT_COMPILER
+ "${TENSORFLOW_AOT_PATH}/../../../../bin/saved_model_cli"
+ CACHE PATH "Path to the Tensorflow AOT compiler")
+ # Unlike the LLVM_HAVE_TF_API case, we don't need to expose this through
+ # llvm-config.h, because it's an internal implementation detail. A user of the llvm library that wants to also
+ # use the TF AOT compiler may do so through their custom build step.
add_definitions("-DLLVM_HAVE_TF_AOT")
include_directories(${TENSORFLOW_AOT_PATH}/include)
add_subdirectory(${TENSORFLOW_AOT_PATH}/xla_aot_runtime_src
${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}/tf_runtime)
endif()
-set(TENSORFLOW_C_LIB_PATH "" CACHE PATH "Path to TensorFlow C library install")
-find_library(tensorflow_c_api tensorflow PATHS ${TENSORFLOW_C_LIB_PATH}/lib)
-
-# Similar to the above Tensorflow dependency, please refer to the same script.
-# In this case, the latest C API library is available for download from
-# https://www.tensorflow.org/install/lang_c
-if (tensorflow_c_api)
- set(LLVM_HAVE_TF_API "ON" CACHE BOOL "Full Tensorflow API available")
- add_definitions("-DLLVM_HAVE_TF_API")
- include_directories(${TENSORFLOW_C_LIB_PATH}/include)
-endif()
-
# Put this before tblgen. Else we have a circular dependence.
add_subdirectory(lib/Demangle)
add_subdirectory(lib/Support)
diff --git a/llvm/include/llvm/Analysis/Utils/TFUtils.h b/llvm/include/llvm/Analysis/Utils/TFUtils.h
index b7de199753a6..118081652e9e 100644
--- a/llvm/include/llvm/Analysis/Utils/TFUtils.h
+++ b/llvm/include/llvm/Analysis/Utils/TFUtils.h
@@ -9,6 +9,8 @@
#ifndef LLVM_ANALYSIS_UTILS_TFUTILS_H
#define LLVM_ANALYSIS_UTILS_TFUTILS_H
+#include "llvm/Config/config.h"
+
#ifdef LLVM_HAVE_TF_API
#include "tensorflow/c/c_api.h"
#include "llvm/IR/LLVMContext.h"
diff --git a/llvm/include/llvm/Config/llvm-config.h.cmake b/llvm/include/llvm/Config/llvm-config.h.cmake
index 475c93efd653..82b682ddb3dc 100644
--- a/llvm/include/llvm/Config/llvm-config.h.cmake
+++ b/llvm/include/llvm/Config/llvm-config.h.cmake
@@ -79,4 +79,7 @@
*/
#cmakedefine01 LLVM_FORCE_ENABLE_STATS
+/* Define if LLVM was built with a dependency to the libtensorflow dynamic library */
+#cmakedefine LLVM_HAVE_TF_API
+
#endif
diff --git a/llvm/unittests/Analysis/InlineSizeEstimatorAnalysisTest.cpp b/llvm/unittests/Analysis/InlineSizeEstimatorAnalysisTest.cpp
index 377590be016a..1d51ae292c88 100644
--- a/llvm/unittests/Analysis/InlineSizeEstimatorAnalysisTest.cpp
+++ b/llvm/unittests/Analysis/InlineSizeEstimatorAnalysisTest.cpp
@@ -26,7 +26,7 @@ using namespace llvm;
extern const char *TestMainArgv0;
extern cl::opt<std::string> TFIR2NativeModelPath;
-#if LLVM_HAVE_TF_API
+#ifdef LLVM_HAVE_TF_API
static std::string getModelPath() {
SmallString<128> InputsDir = unittest::getInputFileDirectory(TestMainArgv0);
llvm::sys::path::append(InputsDir, "ir2native_x86_64_model");
@@ -87,13 +87,13 @@ define internal i32 @top() {
)IR");
FunctionAnalysisManager FAM = buildFAM();
-#if LLVM_HAVE_TF_API
+#ifdef LLVM_HAVE_TF_API
TFIR2NativeModelPath = getModelPath();
#endif
InlineSizeEstimatorAnalysis FA;
auto SizeEstimate = FA.run(*M->getFunction("branches"), FAM);
-#if LLVM_HAVE_TF_API
+#ifdef LLVM_HAVE_TF_API
EXPECT_GT(*SizeEstimate, 0);
#else
EXPECT_FALSE(SizeEstimate.hasValue());
More information about the llvm-commits
mailing list