[llvm] f34ef24 - [mlgo] Skip AOT-compiling a model if a header/object pair is provided
Mircea Trofin via llvm-commits
llvm-commits at lists.llvm.org
Tue Apr 13 09:46:42 PDT 2021
Author: Mircea Trofin
Date: 2021-04-13T09:46:29-07:00
New Revision: f34ef248d37456b9978d88eee4b349076e2798a1
URL: https://github.com/llvm/llvm-project/commit/f34ef248d37456b9978d88eee4b349076e2798a1
DIFF: https://github.com/llvm/llvm-project/commit/f34ef248d37456b9978d88eee4b349076e2798a1.diff
LOG: [mlgo] Skip AOT-compiling a model if a header/object pair is provided
This allows one to cross-compile the header/object for a model in a
setup where the compiler is built on a system that cannot host the AOT
compiler. For example, if arm-hostable clang is desired, while the AOT
Tensorflow compiler can cross-compile to arm, it can't currently run on
arm.
The only alternative in that scenario would be to cross-compile clang
itself, but that gets complicated when trying to run tests after that.
Differential Revision: https://reviews.llvm.org/D99992
Added:
Modified:
llvm/cmake/modules/TensorFlowCompile.cmake
Removed:
################################################################################
diff --git a/llvm/cmake/modules/TensorFlowCompile.cmake b/llvm/cmake/modules/TensorFlowCompile.cmake
index cbb450e6bf798..c4fae4f021807 100644
--- a/llvm/cmake/modules/TensorFlowCompile.cmake
+++ b/llvm/cmake/modules/TensorFlowCompile.cmake
@@ -9,27 +9,38 @@ function(tfgetmodel model final_path)
endif()
endfunction()
-# Run the tensorflow compiler (saved_model_cli) on the saved model in the
+# Run the tensorflow compiler (saved_model_cli) on the saved model in the
# ${model} directory, looking for the ${tag_set} tag set, and the SignatureDef
# ${signature_def_key}.
-# Produce a pair of files called ${fname}.h and ${fname}.o in the
+# Produce a pair of files called ${fname}.h and ${fname}.o in the
# ${CMAKE_CURRENT_BINARY_DIR}. The generated header will define a C++ class
# called ${cpp_class} - which may be a namespace-qualified class name.
function(tfcompile model tag_set signature_def_key fname cpp_class)
- tfgetmodel(${model} LLVM_ML_MODELS_ABSOLUTE)
- message("Using model at " ${LLVM_ML_MODELS_ABSOLUTE})
set(prefix ${CMAKE_CURRENT_BINARY_DIR}/${fname})
set(obj_file ${prefix}.o)
set(hdr_file ${prefix}.h)
- add_custom_command(OUTPUT ${obj_file} ${hdr_file}
- COMMAND "XLA_FLAGS=\"--xla_cpu_multi_thread_eigen=false\"" ${TENSORFLOW_AOT_COMPILER} aot_compile_cpu
- --dir ${LLVM_ML_MODELS_ABSOLUTE}
- --tag_set ${tag_set}
- --signature_def_key ${signature_def_key}
- --output_prefix ${prefix}
- --cpp_class ${cpp_class}
- --target_triple ${LLVM_HOST_TRIPLE}
- )
+ string(TOUPPER ${fname} fname_allcaps)
+ set(override_header ${LLVM_OVERRIDE_MODEL_HEADER_${fname_allcaps}})
+ set(override_object ${LLVM_OVERRIDE_MODEL_OBJECT_${fname_allcaps}})
+ if (EXISTS "${override_header}" AND EXISTS "${override_object}")
+ configure_file(${override_header} ${hdr_file} COPYONLY)
+ configure_file(${override_object} ${obj_file} COPYONLY)
+ message("Using provided header "
+ ${hdr_file} " and object " ${obj_file}
+ " files for model " ${model})
+ else()
+ tfgetmodel(${model} LLVM_ML_MODELS_ABSOLUTE)
+ message("Using model at " ${LLVM_ML_MODELS_ABSOLUTE})
+ add_custom_command(OUTPUT ${obj_file} ${hdr_file}
+ COMMAND "XLA_FLAGS=\"--xla_cpu_multi_thread_eigen=false\"" ${TENSORFLOW_AOT_COMPILER} aot_compile_cpu
+ --dir ${LLVM_ML_MODELS_ABSOLUTE}
+ --tag_set ${tag_set}
+ --signature_def_key ${signature_def_key}
+ --output_prefix ${prefix}
+ --cpp_class ${cpp_class}
+ --target_triple ${LLVM_HOST_TRIPLE}
+ )
+ endif()
# Aggregate the objects so that results of
diff erent tfcompile calls may be
# grouped into one target.
More information about the llvm-commits
mailing list