c++: remove LAMBDA_EXPR_CAPTURES_THIS_P
[official-gcc.git] / gcc / config / aarch64 / tuning_models / generic_armv8_a.h
blob35de3f032963980f48ad05b3bea69c26fc8ac654
1 /* Tuning model description for AArch64 architecture.
2 Copyright (C) 2023-2025 Free Software Foundation, Inc.
4 This file is part of GCC.
6 GCC is free software; you can redistribute it and/or modify it
7 under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3, or (at your option)
9 any later version.
11 GCC is distributed in the hope that it will be useful, but
12 WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with GCC; see the file COPYING3. If not see
18 <http://www.gnu.org/licenses/>. */
20 #ifndef GCC_AARCH64_H_GENERIC_ARMV8_A
21 #define GCC_AARCH64_H_GENERIC_ARMV8_A
23 #include "generic.h"
25 static const struct cpu_addrcost_table generic_armv8_a_addrcost_table =
28 1, /* hi */
29 0, /* si */
30 0, /* di */
31 1, /* ti */
33 0, /* pre_modify */
34 0, /* post_modify */
35 0, /* post_modify_ld3_st3 */
36 0, /* post_modify_ld4_st4 */
37 0, /* register_offset */
38 0, /* register_sextend */
39 0, /* register_zextend */
40 0 /* imm_offset */
43 static const struct cpu_regmove_cost generic_armv8_a_regmove_cost =
45 1, /* GP2GP */
46 /* Avoid the use of slow int<->fp moves for spilling by setting
47 their cost higher than memmov_cost. */
48 5, /* GP2FP */
49 5, /* FP2GP */
50 2 /* FP2FP */
53 /* Generic costs for Advanced SIMD vector operations. */
54 static const advsimd_vec_cost generic_armv8_a_advsimd_vector_cost =
56 1, /* int_stmt_cost */
57 1, /* fp_stmt_cost */
58 0, /* ld2_st2_permute_cost */
59 0, /* ld3_st3_permute_cost */
60 0, /* ld4_st4_permute_cost */
61 2, /* permute_cost */
62 2, /* reduc_i8_cost */
63 2, /* reduc_i16_cost */
64 2, /* reduc_i32_cost */
65 2, /* reduc_i64_cost */
66 2, /* reduc_f16_cost */
67 2, /* reduc_f32_cost */
68 2, /* reduc_f64_cost */
69 2, /* store_elt_extra_cost */
70 2, /* vec_to_scalar_cost */
71 1, /* scalar_to_vec_cost */
72 1, /* align_load_cost */
73 1, /* unalign_load_cost */
74 1, /* unalign_store_cost */
75 1 /* store_cost */
78 /* Generic costs for SVE vector operations. */
79 static const sve_vec_cost generic_armv8_a_sve_vector_cost =
82 1, /* int_stmt_cost */
83 1, /* fp_stmt_cost */
84 0, /* ld2_st2_permute_cost */
85 0, /* ld3_st3_permute_cost */
86 0, /* ld4_st4_permute_cost */
87 2, /* permute_cost */
88 2, /* reduc_i8_cost */
89 2, /* reduc_i16_cost */
90 2, /* reduc_i32_cost */
91 2, /* reduc_i64_cost */
92 2, /* reduc_f16_cost */
93 2, /* reduc_f32_cost */
94 2, /* reduc_f64_cost */
95 2, /* store_elt_extra_cost */
96 2, /* vec_to_scalar_cost */
97 1, /* scalar_to_vec_cost */
98 1, /* align_load_cost */
99 1, /* unalign_load_cost */
100 1, /* unalign_store_cost */
101 1 /* store_cost */
103 2, /* clast_cost */
104 2, /* fadda_f16_cost */
105 2, /* fadda_f32_cost */
106 2, /* fadda_f64_cost */
107 4, /* gather_load_x32_cost */
108 2, /* gather_load_x64_cost */
109 12, /* gather_load_x32_init_cost */
110 4, /* gather_load_x64_init_cost */
111 1 /* scatter_store_elt_cost */
114 /* Generic costs for vector insn classes. */
115 static const struct cpu_vector_cost generic_armv8_a_vector_cost =
117 1, /* scalar_int_stmt_cost */
118 1, /* scalar_fp_stmt_cost */
119 1, /* scalar_load_cost */
120 1, /* scalar_store_cost */
121 3, /* cond_taken_branch_cost */
122 1, /* cond_not_taken_branch_cost */
123 &generic_armv8_a_advsimd_vector_cost, /* advsimd */
124 &generic_armv8_a_sve_vector_cost, /* sve */
125 nullptr /* issue_info */
128 /* Generic costs for branch instructions. */
129 static const struct cpu_branch_cost generic_armv8_a_branch_cost =
131 1, /* Predictable. */
132 3 /* Unpredictable. */
135 /* Generic approximation modes. */
136 static const cpu_approx_modes generic_armv8_a_approx_modes =
138 AARCH64_APPROX_NONE, /* division */
139 AARCH64_APPROX_NONE, /* sqrt */
140 AARCH64_APPROX_NONE /* recip_sqrt */
143 /* Generic prefetch settings (which disable prefetch). */
144 static const cpu_prefetch_tune generic_armv8_a_prefetch_tune =
146 0, /* num_slots */
147 -1, /* l1_cache_size */
148 -1, /* l1_cache_line_size */
149 -1, /* l2_cache_size */
150 true, /* prefetch_dynamic_strides */
151 -1, /* minimum_stride */
152 -1 /* default_opt_level */
155 static const struct tune_params generic_armv8_a_tunings =
157 &cortexa76_extra_costs,
158 &generic_armv8_a_addrcost_table,
159 &generic_armv8_a_regmove_cost,
160 &generic_armv8_a_vector_cost,
161 &generic_armv8_a_branch_cost,
162 &generic_armv8_a_approx_modes,
163 SVE_NOT_IMPLEMENTED, /* sve_width */
164 { 4, /* load_int. */
165 2, /* store_int. */
166 5, /* load_fp. */
167 2, /* store_fp. */
168 4, /* load_pred. */
169 4 /* store_pred. */
170 }, /* memmov_cost. */
171 3, /* issue_rate */
172 AARCH64_FUSE_BASE, /* fusible_ops */
173 "32:16", /* function_align. */
174 "4", /* jump_align. */
175 "32:16", /* loop_align. */
176 2, /* int_reassoc_width. */
177 4, /* fp_reassoc_width. */
178 1, /* fma_reassoc_width. */
179 2, /* vec_reassoc_width. */
180 2, /* min_div_recip_mul_sf. */
181 2, /* min_div_recip_mul_df. */
182 0, /* max_case_values. */
183 tune_params::AUTOPREFETCHER_WEAK, /* autoprefetcher_model. */
184 (AARCH64_EXTRA_TUNE_BASE
185 | AARCH64_EXTRA_TUNE_CSE_SVE_VL_CONSTANTS
186 | AARCH64_EXTRA_TUNE_MATCHED_VECTOR_THROUGHPUT), /* tune_flags. */
187 &generic_prefetch_tune,
188 AARCH64_LDP_STP_POLICY_ALWAYS, /* ldp_policy_model. */
189 AARCH64_LDP_STP_POLICY_ALWAYS /* stp_policy_model. */
192 #endif /* GCC_AARCH64_H_GENERIC_ARMV8_A. */