[llvm] edf8e3e - [NFC][mlgo]Make the test model generator inlining-specific
Mircea Trofin via llvm-commits
llvm-commits at lists.llvm.org
Wed Dec 22 13:44:26 PST 2021
Author: Mircea Trofin
Date: 2021-12-22T13:38:45-08:00
New Revision: edf8e3ea5ee74f7bf86651b6caa29e456e3119cd
URL: https://github.com/llvm/llvm-project/commit/edf8e3ea5ee74f7bf86651b6caa29e456e3119cd
DIFF: https://github.com/llvm/llvm-project/commit/edf8e3ea5ee74f7bf86651b6caa29e456e3119cd.diff
LOG: [NFC][mlgo]Make the test model generator inlining-specific
When looking at building the generator for regalloc, we realized we'd
need quite a bit of custom logic, and that perhaps it'd be easier to
just have each usecase (each kind of mlgo policy) have it's own
stand-alone test generator.
This patch just consolidates the old `config.py` and
`generate_mock_model.py` into one file, and does away with
subdirectories under Analysis/models.
Added:
llvm/lib/Analysis/models/gen-inline-oz-test-model.py
Modified:
llvm/cmake/modules/TensorFlowCompile.cmake
llvm/lib/Analysis/CMakeLists.txt
llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll
llvm/test/Transforms/Inline/ML/development-training-log.ll
llvm/test/Transforms/Inline/ML/ml-test-development-mode.ll
Removed:
llvm/lib/Analysis/models/generate_mock_model.py
llvm/lib/Analysis/models/inlining/config.py
################################################################################
diff --git a/llvm/cmake/modules/TensorFlowCompile.cmake b/llvm/cmake/modules/TensorFlowCompile.cmake
index ea5fa56cbdba8..9427a26de8be6 100644
--- a/llvm/cmake/modules/TensorFlowCompile.cmake
+++ b/llvm/cmake/modules/TensorFlowCompile.cmake
@@ -27,15 +27,13 @@ function(tf_get_model model final_path)
endfunction()
# Generate a mock model for tests.
-function(generate_mock_model model generate_mock_model_py config)
- tf_get_absolute_path(${model} ${CMAKE_CURRENT_BINARY_DIR} LLVM_ML_MODELS_ABSOLUTE)
- tf_get_absolute_path(${generate_mock_model_py} ${CMAKE_CURRENT_SOURCE_DIR} GENERATED_MODEL_ABSOLUTE_PATH)
- tf_get_absolute_path(${config} ${CMAKE_CURRENT_SOURCE_DIR} LLVM_ML_MODEL_CONFIG_ABSOLUTE)
+function(generate_mock_model generator output)
+ tf_get_absolute_path(${generator} ${CMAKE_CURRENT_SOURCE_DIR} generator_absolute_path)
+ tf_get_absolute_path(${output} ${CMAKE_CURRENT_BINARY_DIR} output_absolute_path)
message(WARNING "Autogenerated mock models should not be used in production builds.")
execute_process(COMMAND python3
- ${GENERATED_MODEL_ABSOLUTE_PATH}
- ${LLVM_ML_MODEL_CONFIG_ABSOLUTE}
- ${LLVM_ML_MODELS_ABSOLUTE}-autogenerated
+ ${generator_absolute_path}
+ ${output_absolute_path}
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
)
endfunction()
@@ -86,7 +84,7 @@ function(tfcompile model tag_set signature_def_key fname cpp_class)
endfunction()
-function(tf_find_and_compile model default_url default_path generation_config tag_set signature_def_key fname cpp_class)
+function(tf_find_and_compile model default_url default_path test_model_generator tag_set signature_def_key fname cpp_class)
if ("${model}" STREQUAL "download")
# Crash if the user wants to download a model but a URL is set to "TO_BE_UPDATED"
if ("${default_url}" STREQUAL "TO_BE_UPDATED")
@@ -97,8 +95,8 @@ function(tf_find_and_compile model default_url default_path generation_config ta
endif()
if ("${model}" STREQUAL "autogenerate")
- generate_mock_model(${default_path} models/generate_mock_model.py ${generation_config})
- set(model ${default_path}-autogenerated)
+ set(model ${default_path}-autogenerated)
+ generate_mock_model(${test_model_generator} ${model})
endif()
tf_get_model(${model} LLVM_ML_MODELS_ABSOLUTE)
diff --git a/llvm/lib/Analysis/CMakeLists.txt b/llvm/lib/Analysis/CMakeLists.txt
index ae0ee22561ced..aec84124129f4 100644
--- a/llvm/lib/Analysis/CMakeLists.txt
+++ b/llvm/lib/Analysis/CMakeLists.txt
@@ -12,7 +12,7 @@ if (DEFINED LLVM_HAVE_TF_AOT OR DEFINED LLVM_HAVE_TF_API)
${LLVM_INLINER_MODEL_PATH}
${LLVM_INLINER_MODEL_CURRENT_URL}
${LLVM_INLINER_MODEL_PATH_DEFAULT}
- "models/inlining/config.py"
+ "models/gen-inline-oz-test-model.py"
serve
action
InlinerSizeModel
diff --git a/llvm/lib/Analysis/models/inlining/config.py b/llvm/lib/Analysis/models/gen-inline-oz-test-model.py
similarity index 58%
rename from llvm/lib/Analysis/models/inlining/config.py
rename to llvm/lib/Analysis/models/gen-inline-oz-test-model.py
index 78d3a8259cc29..d800b1476eede 100644
--- a/llvm/lib/Analysis/models/inlining/config.py
+++ b/llvm/lib/Analysis/models/gen-inline-oz-test-model.py
@@ -1,4 +1,13 @@
-"""Inlining Training config."""
+"""Generate a mock model for LLVM tests.
+
+The generated model is not a neural net - it is just a tf.function with the
+correct input and output parameters. By construction, the mock model will always
+output 1.
+"""
+
+import os
+import importlib.util
+import sys
import tensorflow as tf
@@ -85,3 +94,49 @@ def get_output_signature():
def get_output_spec():
return POLICY_OUTPUT_SPEC
+
+def get_output_spec_path(path):
+ return os.path.join(path, 'output_spec.json')
+
+
+def build_mock_model(path, signature):
+ """Build and save the mock model with the given signature"""
+ module = tf.Module()
+
+ # We have to set this useless variable in order for the TF C API to correctly
+ # intake it
+ module.var = tf.Variable(0.)
+
+ def action(*inputs):
+ s = tf.reduce_sum([tf.cast(x, tf.float32) for x in tf.nest.flatten(inputs)])
+ return {signature['output']: float('inf') + s + module.var}
+
+ module.action = tf.function()(action)
+ action = {'action': module.action.get_concrete_function(signature['inputs'])}
+ tf.saved_model.save(module, path, signatures=action)
+
+ output_spec_path = get_output_spec_path(path)
+ with open(output_spec_path, 'w') as f:
+ print(f'Writing output spec to {output_spec_path}.')
+ f.write(signature['output_spec'])
+
+
+def get_signature():
+ return {
+ 'inputs': get_input_signature(),
+ 'output': get_output_signature(),
+ 'output_spec': get_output_spec()
+ }
+
+
+def main(argv):
+ assert len(argv) == 2
+ model_path = argv[1]
+
+ print(f'Output model to: [{argv[1]}]')
+ signature = get_signature()
+ build_mock_model(model_path, signature)
+
+
+if __name__ == '__main__':
+ main(sys.argv)
diff --git a/llvm/lib/Analysis/models/generate_mock_model.py b/llvm/lib/Analysis/models/generate_mock_model.py
deleted file mode 100644
index f1170abba3836..0000000000000
--- a/llvm/lib/Analysis/models/generate_mock_model.py
+++ /dev/null
@@ -1,69 +0,0 @@
-"""Generate a mock model for LLVM tests.
-
-The generated model is not a neural net - it is just a tf.function with the
-correct input and output parameters. By construction, the mock model will always
-output 1.
-"""
-
-import os
-import importlib.util
-import sys
-
-import tensorflow as tf
-
-
-def get_output_spec_path(path):
- return os.path.join(path, 'output_spec.json')
-
-
-def build_mock_model(path, signature):
- """Build and save the mock model with the given signature"""
- module = tf.Module()
-
- # We have to set this useless variable in order for the TF C API to correctly
- # intake it
- module.var = tf.Variable(0.)
-
- def action(*inputs):
- s = tf.reduce_sum([tf.cast(x, tf.float32) for x in tf.nest.flatten(inputs)])
- return {signature['output']: float('inf') + s + module.var}
-
- module.action = tf.function()(action)
- action = {'action': module.action.get_concrete_function(signature['inputs'])}
- tf.saved_model.save(module, path, signatures=action)
-
- output_spec_path = get_output_spec_path(path)
- with open(output_spec_path, 'w') as f:
- print(f'Writing output spec to {output_spec_path}.')
- f.write(signature['output_spec'])
-
-
-def get_external_signature(config_path):
- """Get the signature for the desired model.
-
- We manually import the python file at config_path to avoid adding a gin
- dependency to the LLVM build.
- """
- spec = importlib.util.spec_from_file_location('config', config_path)
- config = importlib.util.module_from_spec(spec)
- spec.loader.exec_module(config)
-
- return {
- 'inputs': config.get_input_signature(),
- 'output': config.get_output_signature(),
- 'output_spec': config.get_output_spec()
- }
-
-
-def main(argv):
- assert len(argv) == 3
- config_path = argv[1]
- model_path = argv[2]
-
- print(f'Using config file at [{argv[1]}]')
- signature = get_external_signature(config_path)
- build_mock_model(model_path, signature)
-
-
-if __name__ == '__main__':
- main(sys.argv)
diff --git a/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll b/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll
index a410a2a071aa0..48c2e0a301a94 100644
--- a/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll
+++ b/llvm/test/Transforms/Inline/ML/bounds-checks-rewards.ll
@@ -8,7 +8,7 @@
;
; Generate mock model
; RUN: rm -rf %t
-; RUN: %python %S/../../../../lib/Analysis/models/generate_mock_model.py %S/../../../../lib/Analysis/models/inlining/config.py %t
+; RUN: %python %S/../../../../lib/Analysis/models/gen-inline-oz-test-model.py %t
;
; When the bounds are very wide ("no bounds"), all inlinings happen.
; RUN: opt -passes=scc-oz-module-inliner -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-model-under-training=%t -training-log=- -tfutils-text-log -enable-ml-inliner=development -ml-advisor-size-increase-threshold=10.0 -S < %s 2>&1 | FileCheck %s --check-prefix=CHECK --check-prefix=NOBOUNDS
diff --git a/llvm/test/Transforms/Inline/ML/development-training-log.ll b/llvm/test/Transforms/Inline/ML/development-training-log.ll
index c571c02b3fd51..7d3b592075828 100644
--- a/llvm/test/Transforms/Inline/ML/development-training-log.ll
+++ b/llvm/test/Transforms/Inline/ML/development-training-log.ll
@@ -2,7 +2,7 @@
; REQUIRES: have_tf_api
; Generate mock model
; RUN: rm -rf %t
-; RUN: %python %S/../../../../lib/Analysis/models/generate_mock_model.py %S/../../../../lib/Analysis/models/inlining/config.py %t
+; RUN: %python %S/../../../../lib/Analysis/models/gen-inline-oz-test-model.py %t
;
; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -tfutils-text-log -ml-inliner-model-under-training=%t -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -S < %s | FileCheck %s
; RUN: opt -enable-ml-inliner=development -passes=scc-oz-module-inliner -training-log=- -tfutils-text-log -ml-inliner-model-under-training=%t -ml-inliner-ir2native-model=%S/../../../../unittests/Analysis/Inputs/ir2native_x86_64_model -ml-inliner-output-spec-override=%S/Inputs/test_output_spec.json -S < %s | FileCheck %s --check-prefixes=EXTRA-OUTPUTS,CHECK
diff --git a/llvm/test/Transforms/Inline/ML/ml-test-development-mode.ll b/llvm/test/Transforms/Inline/ML/ml-test-development-mode.ll
index b755cf015a0d1..d902b9e4c778f 100644
--- a/llvm/test/Transforms/Inline/ML/ml-test-development-mode.ll
+++ b/llvm/test/Transforms/Inline/ML/ml-test-development-mode.ll
@@ -7,6 +7,6 @@
;
; REQUIRES: have_tf_api
; RUN: rm -rf %t && mkdir %t
-; RUN: %python %S/../../../../lib/Analysis/models/generate_mock_model.py %S/../../../../lib/Analysis/models/inlining/config.py %t
+; RUN: %python %S/../../../../lib/Analysis/models/gen-inline-oz-test-model.py %t
; RUN: opt -passes=scc-oz-module-inliner -enable-ml-inliner=default -S < %S/Inputs/test-module.ll 2>&1 | FileCheck %S/Inputs/test-module.ll --check-prefix=DEFAULT
; RUN: opt -passes=scc-oz-module-inliner -enable-ml-inliner=development -ml-inliner-model-under-training=%t -S < %S/Inputs/test-module.ll 2>&1 | FileCheck %S/Inputs/test-module.ll --check-prefix=CHECK
More information about the llvm-commits
mailing list