[llvm-shlib] Fix the version naming style of libLLVM for Windows (#85710)
[llvm-project.git] / llvm / lib / Analysis / models / gen-inline-oz-test-model.py
blob4898509ea544f562d8147825eba085f8acdd7b39
1 """Generate a mock model for LLVM tests.
3 The generated model is not a neural net - it is just a tf.function with the
4 correct input and output parameters. By construction, the mock model will always
5 output 1.
6 """
8 import os
9 import importlib.util
10 import sys
12 import tensorflow as tf
14 POLICY_DECISION_LABEL = "inlining_decision"
15 POLICY_OUTPUT_SPEC = """
18 "logging_name": "inlining_decision",
19 "tensor_spec": {
20 "name": "StatefulPartitionedCall",
21 "port": 0,
22 "type": "int64_t",
23 "shape": [
29 """
32 # pylint: disable=g-complex-comprehension
33 def get_input_signature():
34 """Returns the list of features for LLVM inlining."""
35 # int64 features
36 inputs = [
37 tf.TensorSpec(dtype=tf.int64, shape=(), name=key)
38 for key in [
39 "caller_basic_block_count",
40 "caller_conditionally_executed_blocks",
41 "caller_users",
42 "callee_basic_block_count",
43 "callee_conditionally_executed_blocks",
44 "callee_users",
45 "nr_ctant_params",
46 "node_count",
47 "edge_count",
48 "callsite_height",
49 "cost_estimate",
50 "inlining_default",
51 "sroa_savings",
52 "sroa_losses",
53 "load_elimination",
54 "call_penalty",
55 "call_argument_setup",
56 "load_relative_intrinsic",
57 "lowered_call_arg_setup",
58 "indirect_call_penalty",
59 "jump_table_penalty",
60 "case_cluster_penalty",
61 "switch_penalty",
62 "unsimplified_common_instructions",
63 "num_loops",
64 "dead_blocks",
65 "simplified_instructions",
66 "constant_args",
67 "constant_offset_ptr_args",
68 "callsite_cost",
69 "cold_cc_penalty",
70 "last_call_to_static_bonus",
71 "is_multiple_blocks",
72 "nested_inlines",
73 "nested_inline_cost_estimate",
74 "threshold",
78 # float32 features
79 inputs.extend(
81 tf.TensorSpec(dtype=tf.float32, shape=(), name=key)
82 for key in ["discount", "reward"]
86 # int32 features
87 inputs.extend(
88 [tf.TensorSpec(dtype=tf.int32, shape=(), name=key) for key in ["step_type"]]
90 return inputs
93 def get_output_signature():
94 return POLICY_DECISION_LABEL
97 def get_output_spec():
98 return POLICY_OUTPUT_SPEC
101 def get_output_spec_path(path):
102 return os.path.join(path, "output_spec.json")
105 def build_mock_model(path, signature):
106 """Build and save the mock model with the given signature"""
107 module = tf.Module()
109 def action(*inputs):
110 return {signature["output"]: tf.constant(value=1, dtype=tf.int64)}
112 module.action = tf.function()(action)
113 action = {"action": module.action.get_concrete_function(signature["inputs"])}
114 tf.saved_model.save(module, path, signatures=action)
116 output_spec_path = get_output_spec_path(path)
117 with open(output_spec_path, "w") as f:
118 print(f"Writing output spec to {output_spec_path}.")
119 f.write(signature["output_spec"])
122 def get_signature():
123 return {
124 "inputs": get_input_signature(),
125 "output": get_output_signature(),
126 "output_spec": get_output_spec(),
130 def main(argv):
131 assert len(argv) == 2
132 model_path = argv[1]
134 print(f"Output model to: [{argv[1]}]")
135 signature = get_signature()
136 build_mock_model(model_path, signature)
139 if __name__ == "__main__":
140 main(sys.argv)