1 /* Tuning model description for AArch64 architecture.
2 Copyright (C) 2009-2025 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
21 #ifndef GCC_AARCH64_H_GENERIC
22 #define GCC_AARCH64_H_GENERIC
24 static const struct cpu_addrcost_table generic_addrcost_table
=
34 0, /* post_modify_ld3_st3 */
35 0, /* post_modify_ld4_st4 */
36 0, /* register_offset */
37 0, /* register_sextend */
38 0, /* register_zextend */
42 static const struct cpu_regmove_cost generic_regmove_cost
=
45 /* Avoid the use of slow int<->fp moves for spilling by setting
46 their cost higher than memmov_cost. */
52 /* Generic costs for Advanced SIMD vector operations. */
53 static const advsimd_vec_cost generic_advsimd_vector_cost
=
55 1, /* int_stmt_cost */
57 0, /* ld2_st2_permute_cost */
58 0, /* ld3_st3_permute_cost */
59 0, /* ld4_st4_permute_cost */
61 2, /* reduc_i8_cost */
62 2, /* reduc_i16_cost */
63 2, /* reduc_i32_cost */
64 2, /* reduc_i64_cost */
65 2, /* reduc_f16_cost */
66 2, /* reduc_f32_cost */
67 2, /* reduc_f64_cost */
68 2, /* store_elt_extra_cost */
69 2, /* vec_to_scalar_cost */
70 1, /* scalar_to_vec_cost */
71 1, /* align_load_cost */
72 1, /* unalign_load_cost */
73 1, /* unalign_store_cost */
77 /* Generic costs for SVE vector operations. */
78 static const sve_vec_cost generic_sve_vector_cost
=
81 1, /* int_stmt_cost */
83 0, /* ld2_st2_permute_cost */
84 0, /* ld3_st3_permute_cost */
85 0, /* ld4_st4_permute_cost */
87 2, /* reduc_i8_cost */
88 2, /* reduc_i16_cost */
89 2, /* reduc_i32_cost */
90 2, /* reduc_i64_cost */
91 2, /* reduc_f16_cost */
92 2, /* reduc_f32_cost */
93 2, /* reduc_f64_cost */
94 2, /* store_elt_extra_cost */
95 2, /* vec_to_scalar_cost */
96 1, /* scalar_to_vec_cost */
97 1, /* align_load_cost */
98 1, /* unalign_load_cost */
99 1, /* unalign_store_cost */
103 2, /* fadda_f16_cost */
104 2, /* fadda_f32_cost */
105 2, /* fadda_f64_cost */
106 4, /* gather_load_x32_cost */
107 2, /* gather_load_x64_cost */
108 0, /* gather_load_x32_init_cost */
109 0, /* gather_load_x64_init_cost */
110 1 /* scatter_store_elt_cost */
113 /* Generic costs for vector insn classes. */
114 static const struct cpu_vector_cost generic_vector_cost
=
116 1, /* scalar_int_stmt_cost */
117 1, /* scalar_fp_stmt_cost */
118 1, /* scalar_load_cost */
119 1, /* scalar_store_cost */
120 3, /* cond_taken_branch_cost */
121 1, /* cond_not_taken_branch_cost */
122 &generic_advsimd_vector_cost
, /* advsimd */
123 &generic_sve_vector_cost
, /* sve */
124 nullptr /* issue_info */
127 /* Generic costs for branch instructions. */
128 static const struct cpu_branch_cost generic_branch_cost
=
130 1, /* Predictable. */
131 3 /* Unpredictable. */
134 /* Generic approximation modes. */
135 static const cpu_approx_modes generic_approx_modes
=
137 AARCH64_APPROX_NONE
, /* division */
138 AARCH64_APPROX_NONE
, /* sqrt */
139 AARCH64_APPROX_NONE
/* recip_sqrt */
142 /* Generic prefetch settings (which disable prefetch). */
143 static const cpu_prefetch_tune generic_prefetch_tune
=
146 -1, /* l1_cache_size */
147 -1, /* l1_cache_line_size */
148 -1, /* l2_cache_size */
149 true, /* prefetch_dynamic_strides */
150 -1, /* minimum_stride */
151 -1 /* default_opt_level */
154 static const struct tune_params generic_tunings
=
156 &cortexa57_extra_costs
,
157 &generic_addrcost_table
,
158 &generic_regmove_cost
,
159 &generic_vector_cost
,
160 &generic_branch_cost
,
161 &generic_approx_modes
,
162 SVE_NOT_IMPLEMENTED
, /* sve_width */
169 }, /* memmov_cost. */
171 AARCH64_FUSE_BASE
, /* fusible_ops */
172 "16:12", /* function_align. */
173 "4", /* jump_align. */
174 "8", /* loop_align. */
175 2, /* int_reassoc_width. */
176 4, /* fp_reassoc_width. */
177 1, /* fma_reassoc_width. */
178 1, /* vec_reassoc_width. */
179 2, /* min_div_recip_mul_sf. */
180 2, /* min_div_recip_mul_df. */
181 0, /* max_case_values. */
182 tune_params::AUTOPREFETCHER_WEAK
, /* autoprefetcher_model. */
183 /* Enabling AARCH64_EXTRA_TUNE_CSE_SVE_VL_CONSTANTS significantly benefits
184 Neoverse V1. It does not have a noticeable effect on A64FX and should
185 have at most a very minor effect on SVE2 cores. */
186 (AARCH64_EXTRA_TUNE_CSE_SVE_VL_CONSTANTS
), /* tune_flags. */
187 &generic_prefetch_tune
,
188 AARCH64_LDP_STP_POLICY_ALWAYS
, /* ldp_policy_model. */
189 AARCH64_LDP_STP_POLICY_ALWAYS
/* stp_policy_model. */
192 #endif /* GCC_AARCH64_H_GENERIC. */