Daily bump.
[official-gcc.git] / gcc / config / aarch64 / aarch64.h
blobe8bd8c73c1296f6ad0d8ab16177eab120afa6f9b
1 /* Machine description for AArch64 architecture.
2 Copyright (C) 2009-2025 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software; you can redistribute it and/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
12 GCC is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC; see the file COPYING3. If not see
19 <http://www.gnu.org/licenses/>. */
22 #ifndef GCC_AARCH64_H
23 #define GCC_AARCH64_H
25 #define aarch64_get_asm_isa_flags(opts) \
26 (aarch64_feature_flags ((opts)->x_aarch64_asm_isa_flags_0, \
27 (opts)->x_aarch64_asm_isa_flags_1))
28 #define aarch64_get_isa_flags(opts) \
29 (aarch64_feature_flags ((opts)->x_aarch64_isa_flags_0, \
30 (opts)->x_aarch64_isa_flags_1))
32 /* Make these flags read-only so that all uses go via
33 aarch64_set_asm_isa_flags. */
34 #ifdef GENERATOR_FILE
35 #undef aarch64_asm_isa_flags
36 #define aarch64_asm_isa_flags (aarch64_feature_flags (aarch64_asm_isa_flags_0,\
37 aarch64_asm_isa_flags_1))
38 #undef aarch64_isa_flags
39 #define aarch64_isa_flags (aarch64_feature_flags (aarch64_isa_flags_0, \
40 aarch64_isa_flags_1))
41 #else
42 #undef aarch64_asm_isa_flags
43 #define aarch64_asm_isa_flags (aarch64_get_asm_isa_flags (&global_options))
44 #undef aarch64_isa_flags
45 #define aarch64_isa_flags (aarch64_get_isa_flags (&global_options))
46 #endif
48 /* Target CPU builtins. */
49 #define TARGET_CPU_CPP_BUILTINS() \
50 aarch64_cpu_cpp_builtins (pfile)
54 #define REGISTER_TARGET_PRAGMAS() aarch64_register_pragmas ()
56 /* Target machine storage layout. */
58 #define PROMOTE_MODE(MODE, UNSIGNEDP, TYPE) \
59 if (GET_MODE_CLASS (MODE) == MODE_INT \
60 && GET_MODE_SIZE (MODE) < 4) \
61 { \
62 if (MODE == QImode || MODE == HImode) \
63 { \
64 MODE = SImode; \
65 } \
68 /* Bits are always numbered from the LSBit. */
69 #define BITS_BIG_ENDIAN 0
71 /* Big/little-endian flavour. */
72 #define BYTES_BIG_ENDIAN (TARGET_BIG_END != 0)
73 #define WORDS_BIG_ENDIAN (BYTES_BIG_ENDIAN)
75 #define UNITS_PER_WORD 8
77 #define UNITS_PER_VREG 16
79 #define PARM_BOUNDARY 64
81 #define STACK_BOUNDARY 128
83 #define FUNCTION_BOUNDARY 32
85 #define EMPTY_FIELD_BOUNDARY 32
87 #define BIGGEST_ALIGNMENT 128
89 #define SHORT_TYPE_SIZE 16
91 #define INT_TYPE_SIZE 32
93 #define LONG_TYPE_SIZE (TARGET_ILP32 ? 32 : 64)
95 #define POINTER_SIZE (TARGET_ILP32 ? 32 : 64)
97 #define LONG_LONG_TYPE_SIZE 64
99 #define WIDEST_HARDWARE_FP_SIZE 64
101 /* This value is the amount of bytes a caller is allowed to drop the stack
102 before probing has to be done for stack clash protection. */
103 #define STACK_CLASH_CALLER_GUARD 1024
105 /* This value represents the minimum amount of bytes we expect the function's
106 outgoing arguments to be when stack-clash is enabled. */
107 #define STACK_CLASH_MIN_BYTES_OUTGOING_ARGS 8
109 /* This value controls how many pages we manually unroll the loop for when
110 generating stack clash probes. */
111 #define STACK_CLASH_MAX_UNROLL_PAGES 4
113 /* The architecture reserves all bits of the address for hardware use,
114 so the vbit must go into the delta field of pointers to member
115 functions. This is the same config as that in the AArch32
116 port. */
117 #define TARGET_PTRMEMFUNC_VBIT_LOCATION ptrmemfunc_vbit_in_delta
120 /* Emit calls to libgcc helpers for atomic operations for runtime detection
121 of LSE instructions. */
122 #define TARGET_OUTLINE_ATOMICS (aarch64_flag_outline_atomics)
124 /* Align global data as an optimization. */
125 #define DATA_ALIGNMENT(EXP, ALIGN) aarch64_data_alignment (EXP, ALIGN)
127 /* Align stack data as an optimization. */
128 #define LOCAL_ALIGNMENT(EXP, ALIGN) aarch64_stack_alignment (EXP, ALIGN)
130 #define STRUCTURE_SIZE_BOUNDARY 8
132 /* Heap alignment (same as BIGGEST_ALIGNMENT and STACK_BOUNDARY). */
133 #define MALLOC_ABI_ALIGNMENT 128
135 /* Defined by the ABI */
136 #define WCHAR_TYPE "unsigned int"
137 #define WCHAR_TYPE_SIZE 32
139 /* Using long long breaks -ansi and -std=c90, so these will need to be
140 made conditional for an LLP64 ABI. */
142 #define SIZE_TYPE "long unsigned int"
144 #define PTRDIFF_TYPE "long int"
146 #define PCC_BITFIELD_TYPE_MATTERS 1
148 /* Use the same RTL truth representation for vector elements as we do
149 for scalars. This maintains the property that a comparison like
150 eq:V4SI is a composition of 4 individual eq:SIs, just like plus:V4SI
151 is a composition of 4 individual plus:SIs.
153 This means that Advanced SIMD comparisons are represented in RTL as
154 (neg (op ...)). */
156 #define VECTOR_STORE_FLAG_VALUE(MODE) CONST1_RTX (GET_MODE_INNER (MODE))
158 #ifndef USED_FOR_TARGET
160 /* Define an enum of all features (ISA modes, architectures and extensions).
161 The ISA modes must come first. */
162 enum class aarch64_feature : unsigned char {
163 #define DEF_AARCH64_ISA_MODE(IDENT) IDENT,
164 #define AARCH64_OPT_EXTENSION(A, IDENT, C, D, E, F) IDENT,
165 #define AARCH64_ARCH(A, B, IDENT, D, E) IDENT,
166 #include "aarch64-isa-modes.def"
167 #include "aarch64-option-extensions.def"
168 #include "aarch64-arches.def"
171 /* Define unique flags for each of the above. */
172 #define HANDLE(IDENT) \
173 constexpr auto AARCH64_FL_##IDENT ATTRIBUTE_UNUSED \
174 = aarch64_feature_flags::from_index (int (aarch64_feature::IDENT));
175 #define DEF_AARCH64_ISA_MODE(IDENT) HANDLE (IDENT)
176 #define AARCH64_OPT_EXTENSION(A, IDENT, C, D, E, F) HANDLE (IDENT)
177 #define AARCH64_ARCH(A, B, IDENT, D, E) HANDLE (IDENT)
178 #include "aarch64-isa-modes.def"
179 #include "aarch64-option-extensions.def"
180 #include "aarch64-arches.def"
181 #undef HANDLE
183 /* Define aarch64_isa_mode masks. */
184 #define DEF_AARCH64_ISA_MODE(IDENT) \
185 constexpr auto AARCH64_ISA_MODE_##IDENT ATTRIBUTE_UNUSED \
186 = aarch64_isa_mode (1) << int (aarch64_feature::IDENT);
187 #include "aarch64-isa-modes.def"
188 #undef HANDLE
190 constexpr auto AARCH64_FL_SM_STATE ATTRIBUTE_UNUSED
191 = AARCH64_FL_SM_ON | AARCH64_FL_SM_OFF;
192 constexpr auto AARCH64_ISA_MODE_SM_STATE ATTRIBUTE_UNUSED
193 = AARCH64_ISA_MODE_SM_ON | AARCH64_ISA_MODE_SM_OFF;
195 /* The mask of all ISA modes. */
196 constexpr auto AARCH64_FL_ISA_MODES
197 = aarch64_feature_flags ((1 << AARCH64_NUM_ISA_MODES) - 1);
199 /* The default ISA mode, for functions with no attributes that specify
200 something to the contrary. */
201 constexpr auto AARCH64_DEFAULT_ISA_MODE ATTRIBUTE_UNUSED
202 = AARCH64_ISA_MODE_SM_OFF;
203 constexpr auto AARCH64_FL_DEFAULT_ISA_MODE ATTRIBUTE_UNUSED
204 = aarch64_feature_flags (AARCH64_DEFAULT_ISA_MODE);
206 #endif
208 /* Macros to test ISA flags.
210 There is intentionally no macro for AARCH64_FL_CRYPTO, since this flag bit
211 is not always set when its constituent features are present.
212 Check (TARGET_AES && TARGET_SHA2) instead. */
214 #define AARCH64_HAVE_ISA(X) (bool (aarch64_isa_flags & AARCH64_FL_##X))
216 #define AARCH64_ISA_MODE ((aarch64_isa_flags & AARCH64_FL_ISA_MODES).val[0])
218 /* The current function is a normal non-streaming function. */
219 #define TARGET_NON_STREAMING AARCH64_HAVE_ISA (SM_OFF)
221 /* The current function has a streaming body. */
222 #define TARGET_STREAMING AARCH64_HAVE_ISA (SM_ON)
224 /* The current function has a streaming-compatible body. */
225 #define TARGET_STREAMING_COMPATIBLE \
226 ((aarch64_isa_flags & AARCH64_FL_SM_STATE) == 0)
228 /* PSTATE.ZA is enabled in the current function body. */
229 #define TARGET_ZA AARCH64_HAVE_ISA (ZA_ON)
231 /* AdvSIMD is supported in the default configuration, unless disabled by
232 -mgeneral-regs-only or by the +nosimd extension. The set of available
233 instructions is then subdivided into:
235 - the "base" set, available both in SME streaming mode and in
236 non-streaming mode
238 - the full set, available only in non-streaming mode. */
239 #define TARGET_BASE_SIMD AARCH64_HAVE_ISA (SIMD)
240 #define TARGET_SIMD (TARGET_BASE_SIMD && TARGET_NON_STREAMING)
241 #define TARGET_FLOAT AARCH64_HAVE_ISA (FP)
243 /* AARCH64_FL options necessary for system register implementation. */
245 /* Define AARCH64_FL aliases for architectural features which are protected
246 by -march flags in binutils but which receive no special treatment by GCC.
248 Such flags are inherited from the Binutils definition of system registers
249 and are mapped to the architecture in which the feature is implemented. */
250 #define AARCH64_FL_RAS AARCH64_FL_V8A
251 #define AARCH64_FL_LOR AARCH64_FL_V8_1A
252 #define AARCH64_FL_PAN AARCH64_FL_V8_1A
253 #define AARCH64_FL_AMU AARCH64_FL_V8_4A
254 #define AARCH64_FL_SCXTNUM AARCH64_FL_V8_5A
255 #define AARCH64_FL_ID_PFR2 AARCH64_FL_V8_5A
257 /* Armv8.9-A extension feature bits defined in Binutils but absent from GCC,
258 aliased to their base architecture. */
259 #define AARCH64_FL_AIE AARCH64_FL_V8_9A
260 #define AARCH64_FL_DEBUGv8p9 AARCH64_FL_V8_9A
261 #define AARCH64_FL_FGT2 AARCH64_FL_V8_9A
262 #define AARCH64_FL_ITE AARCH64_FL_V8_9A
263 #define AARCH64_FL_PFAR AARCH64_FL_V8_9A
264 #define AARCH64_FL_PMUv3_ICNTR AARCH64_FL_V8_9A
265 #define AARCH64_FL_PMUv3_SS AARCH64_FL_V8_9A
266 #define AARCH64_FL_PMUv3p9 AARCH64_FL_V8_9A
267 #define AARCH64_FL_RASv2 AARCH64_FL_V8_9A
268 #define AARCH64_FL_S1PIE AARCH64_FL_V8_9A
269 #define AARCH64_FL_S1POE AARCH64_FL_V8_9A
270 #define AARCH64_FL_S2PIE AARCH64_FL_V8_9A
271 #define AARCH64_FL_S2POE AARCH64_FL_V8_9A
272 #define AARCH64_FL_SCTLR2 AARCH64_FL_V8_9A
273 #define AARCH64_FL_SEBEP AARCH64_FL_V8_9A
274 #define AARCH64_FL_SPE_FDS AARCH64_FL_V8_9A
275 #define AARCH64_FL_TCR2 AARCH64_FL_V8_9A
277 #define TARGET_V8R AARCH64_HAVE_ISA (V8R)
278 #define TARGET_V9A AARCH64_HAVE_ISA (V9A)
281 /* SHA2 is an optional extension to AdvSIMD. */
282 #define TARGET_SHA2 AARCH64_HAVE_ISA (SHA2)
284 /* SHA3 is an optional extension to AdvSIMD. */
285 #define TARGET_SHA3 AARCH64_HAVE_ISA (SHA3)
287 /* AES is an optional extension to AdvSIMD. */
288 #define TARGET_AES AARCH64_HAVE_ISA (AES)
290 /* SM is an optional extension to AdvSIMD. */
291 #define TARGET_SM4 AARCH64_HAVE_ISA (SM4)
293 /* CRC instructions that can be enabled through +crc arch extension. */
294 #define TARGET_CRC32 AARCH64_HAVE_ISA (CRC)
296 /* Atomic instructions that can be enabled through the +lse extension. */
297 #define TARGET_LSE AARCH64_HAVE_ISA (LSE)
299 /* ARMv8.2-A FP16 support that can be enabled through the +fp16 extension. */
300 #define TARGET_FP_F16INST AARCH64_HAVE_ISA (F16)
301 #define TARGET_SIMD_F16INST (TARGET_SIMD && TARGET_FP_F16INST)
303 /* FP16FML is an optional extension to AdvSIMD. */
304 #define TARGET_F16FML (TARGET_SIMD_F16INST && AARCH64_HAVE_ISA (F16FML))
306 /* Dot Product is an optional extension to AdvSIMD enabled through +dotprod. */
307 #define TARGET_DOTPROD AARCH64_HAVE_ISA (DOTPROD)
309 /* SVE instructions, enabled through +sve. */
310 #define TARGET_SVE AARCH64_HAVE_ISA (SVE)
312 /* SVE2 instructions, enabled through +sve2. */
313 #define TARGET_SVE2 AARCH64_HAVE_ISA (SVE2)
315 /* SVE2 AES instructions, enabled through +sve2-aes. */
316 #define TARGET_SVE2_AES (AARCH64_HAVE_ISA (SVE2_AES) && TARGET_NON_STREAMING)
318 /* SVE2 BITPERM instructions, enabled through +sve2-bitperm. */
319 #define TARGET_SVE2_BITPERM (AARCH64_HAVE_ISA (SVE2_BITPERM) \
320 && TARGET_NON_STREAMING)
322 /* SVE2 SHA3 instructions, enabled through +sve2-sha3. */
323 #define TARGET_SVE2_SHA3 (AARCH64_HAVE_ISA (SVE2_SHA3) && TARGET_NON_STREAMING)
325 /* SVE2 SM4 instructions, enabled through +sve2-sm4. */
326 #define TARGET_SVE2_SM4 (AARCH64_HAVE_ISA (SVE2_SM4) && TARGET_NON_STREAMING)
328 /* SVE2p1 instructions, enabled through +sve2p1. */
329 #define TARGET_SVE2p1 AARCH64_HAVE_ISA (SVE2p1)
331 /* SME instructions, enabled through +sme. Note that this does not
332 imply anything about the state of PSTATE.SM; instructions that require
333 SME and streaming mode should use TARGET_STREAMING instead. */
334 #define TARGET_SME AARCH64_HAVE_ISA (SME)
336 /* The FEAT_SME_I16I64 extension to SME, enabled through +sme-i16i64. */
337 #define TARGET_SME_I16I64 AARCH64_HAVE_ISA (SME_I16I64)
339 /* The FEAT_SME_B16B16 extension to SME, enabled through +sme-b16b16. */
340 #define TARGET_STREAMING_SME_B16B16 \
341 (AARCH64_HAVE_ISA (SME_B16B16) && TARGET_STREAMING)
343 /* The FEAT_SME_F16F16 extension to SME, enabled through +sme-f16f16. */
344 #define TARGET_STREAMING_SME_F16F16 \
345 (AARCH64_HAVE_ISA (SME_F16F16) && TARGET_STREAMING)
347 /* The FEAT_SME_F64F64 extension to SME, enabled through +sme-f64f64. */
348 #define TARGET_SME_F64F64 AARCH64_HAVE_ISA (SME_F64F64)
350 /* SME2 instructions, enabled through +sme2. */
351 #define TARGET_SME2 AARCH64_HAVE_ISA (SME2)
353 /* Same with streaming mode enabled. */
354 #define TARGET_STREAMING_SME2 (TARGET_STREAMING && TARGET_SME2)
356 #define TARGET_STREAMING_SME2p1 (TARGET_STREAMING && AARCH64_HAVE_ISA (SME2p1))
358 #define TARGET_SME_B16B16 AARCH64_HAVE_ISA (SME_B16B16)
360 /* ARMv8.3-A features. */
361 #define TARGET_ARMV8_3 AARCH64_HAVE_ISA (V8_3A)
363 /* Javascript conversion instruction from Armv8.3-a. */
364 #define TARGET_JSCVT AARCH64_HAVE_ISA (JSCVT)
366 /* Armv8.3-a Complex number extension to AdvSIMD extensions. */
367 #define TARGET_COMPLEX AARCH64_HAVE_ISA (FCMA)
369 /* Floating-point rounding instructions from Armv8.5-a. */
370 #define TARGET_FRINT AARCH64_HAVE_ISA (FRINTTS)
372 /* TME instructions are enabled. */
373 #define TARGET_TME AARCH64_HAVE_ISA (TME)
375 /* Random number instructions from Armv8.5-a. */
376 #define TARGET_RNG AARCH64_HAVE_ISA (RNG)
378 /* Memory Tagging instructions optional to Armv8.5 enabled through +memtag. */
379 #define TARGET_MEMTAG AARCH64_HAVE_ISA (MEMTAG)
381 /* I8MM instructions are enabled through +i8mm. */
382 #define TARGET_I8MM AARCH64_HAVE_ISA (I8MM)
383 #define TARGET_SVE_I8MM (TARGET_SVE && TARGET_I8MM)
385 /* F32MM instructions are enabled through +f32mm. */
386 #define TARGET_SVE_F32MM AARCH64_HAVE_ISA (F32MM)
388 /* F64MM instructions are enabled through +f64mm. */
389 #define TARGET_SVE_F64MM AARCH64_HAVE_ISA (F64MM)
391 /* BF16 instructions are enabled through +bf16. */
392 #define TARGET_BF16_FP AARCH64_HAVE_ISA (BF16)
393 #define TARGET_BF16_SIMD (TARGET_BF16_FP && TARGET_SIMD)
394 #define TARGET_SVE_BF16 (TARGET_BF16_FP && TARGET_SVE)
396 /* PAUTH instructions are enabled through +pauth. */
397 #define TARGET_PAUTH AARCH64_HAVE_ISA (PAUTH)
399 /* BTI instructions exist from Armv8.5-a onwards. Their automatic use is
400 enabled through -mbranch-protection by using NOP-space instructions,
401 but this TARGET_ is used for defining BTI-related ACLE things. */
402 #define TARGET_BTI AARCH64_HAVE_ISA (V8_5A)
404 /* MOPS instructions are enabled through +mops. */
405 #define TARGET_MOPS AARCH64_HAVE_ISA (MOPS)
407 /* LS64 instructions are enabled through +ls64. */
408 #define TARGET_LS64 AARCH64_HAVE_ISA (LS64)
410 /* CSSC instructions are enabled through +cssc. */
411 #define TARGET_CSSC AARCH64_HAVE_ISA (CSSC)
413 /* Make sure this is always defined so we don't have to check for ifdefs
414 but rather use normal ifs. */
415 #ifndef TARGET_FIX_ERR_A53_835769_DEFAULT
416 #define TARGET_FIX_ERR_A53_835769_DEFAULT 0
417 #else
418 #undef TARGET_FIX_ERR_A53_835769_DEFAULT
419 #define TARGET_FIX_ERR_A53_835769_DEFAULT 1
420 #endif
422 /* SB instruction is enabled through +sb. */
423 #define TARGET_SB AARCH64_HAVE_ISA (SB)
425 /* RCPC loads from Armv8.3-a. */
426 #define TARGET_RCPC AARCH64_HAVE_ISA (RCPC)
428 /* The RCPC2 extensions from Armv8.4-a that allow immediate offsets to LDAPR
429 and sign-extending versions.*/
430 #define TARGET_RCPC2 AARCH64_HAVE_ISA (RCPC2)
432 /* RCPC3 (Release Consistency) extensions, optional from Armv8.2-a. */
433 #define TARGET_RCPC3 AARCH64_HAVE_ISA (RCPC3)
435 /* Apply the workaround for Cortex-A53 erratum 835769. */
436 #define TARGET_FIX_ERR_A53_835769 \
437 ((aarch64_fix_a53_err835769 == 2) \
438 ? TARGET_FIX_ERR_A53_835769_DEFAULT : aarch64_fix_a53_err835769)
440 /* Make sure this is always defined so we don't have to check for ifdefs
441 but rather use normal ifs. */
442 #ifndef TARGET_FIX_ERR_A53_843419_DEFAULT
443 #define TARGET_FIX_ERR_A53_843419_DEFAULT 0
444 #else
445 #undef TARGET_FIX_ERR_A53_843419_DEFAULT
446 #define TARGET_FIX_ERR_A53_843419_DEFAULT 1
447 #endif
449 /* Apply the workaround for Cortex-A53 erratum 843419. */
450 #define TARGET_FIX_ERR_A53_843419 \
451 ((aarch64_fix_a53_err843419 == 2) \
452 ? TARGET_FIX_ERR_A53_843419_DEFAULT : aarch64_fix_a53_err843419)
454 /* ARMv8.1-A Adv.SIMD support. */
455 #define TARGET_SIMD_RDMA (TARGET_SIMD && AARCH64_HAVE_ISA (RDMA))
457 /* Armv9.4-A features. */
458 #define TARGET_ARMV9_4 AARCH64_HAVE_ISA (V9_4A)
460 /* 128-bit System Registers and Instructions from Armv9.4-a are enabled
461 through +d128. */
462 #define TARGET_D128 AARCH64_HAVE_ISA (D128)
464 /* Armv8.9-A/9.4-A Translation Hardening Extension system registers are
465 enabled through +the. */
466 #define TARGET_THE AARCH64_HAVE_ISA (THE)
468 /* Armv9.4-A Guarded Control Stack extension system registers are
469 enabled through +gcs. */
470 #define TARGET_GCS AARCH64_HAVE_ISA (GCS)
472 /* Floating Point Absolute Maximum/Minimum extension instructions are
473 enabled through +faminmax. */
474 #define TARGET_FAMINMAX AARCH64_HAVE_ISA (FAMINMAX)
476 /* Lookup table (LUTI) extension instructions are enabled through +lut. */
477 #define TARGET_LUT AARCH64_HAVE_ISA (LUT)
479 /* Prefer different predicate registers for the output of a predicated
480 operation over re-using an existing input predicate. */
481 #define TARGET_SVE_PRED_CLOBBER (TARGET_SVE \
482 && (aarch64_tune_params.extra_tuning_flags \
483 & AARCH64_EXTRA_TUNE_AVOID_PRED_RMW))
485 /* fp8 instructions are enabled through +fp8. */
486 #define TARGET_FP8 AARCH64_HAVE_ISA (FP8)
488 /* See the comment above the tuning flag for details. */
489 #define TARGET_CHEAP_FPMR_WRITE \
490 (bool (aarch64_tune_params.extra_tuning_flags \
491 & AARCH64_EXTRA_TUNE_CHEAP_FPMR_WRITE))
493 /* Combinatorial tests. */
495 #define TARGET_SVE2_OR_SME2 \
496 ((TARGET_SVE2 || TARGET_STREAMING) \
497 && (TARGET_SME2 || TARGET_NON_STREAMING))
499 /* There's no need to check TARGET_SME for streaming or streaming-compatible
500 functions, since streaming mode itself implies SME. */
501 #define TARGET_SVE2p1_OR_SME (TARGET_SVE2p1 || TARGET_STREAMING)
503 #define TARGET_SVE2p1_OR_SME2 \
504 ((TARGET_SVE2p1 || TARGET_STREAMING) \
505 && (TARGET_SME2 || TARGET_NON_STREAMING))
507 #define TARGET_SSVE_B16B16 \
508 (AARCH64_HAVE_ISA (SVE_B16B16) && TARGET_SVE2_OR_SME2)
510 /* Some fp8 instructions require +fp8 and one of +sve2 or +sme2. */
511 #define TARGET_SSVE_FP8 (TARGET_FP8 \
512 && (TARGET_SVE2 || TARGET_STREAMING) \
513 && (TARGET_SME2 || TARGET_NON_STREAMING))
515 /* fp8 multiply-accumulate instructions are enabled through +fp8fma. */
516 #define TARGET_FP8FMA AARCH64_HAVE_ISA (FP8FMA)
518 /* SVE2 versions of fp8 multiply-accumulate instructions are enabled for
519 non-streaming mode by +fp8fma and for streaming mode by +ssve-fp8fma. */
520 #define TARGET_SSVE_FP8FMA \
521 (((TARGET_SVE2 && TARGET_FP8FMA) || TARGET_STREAMING) \
522 && (AARCH64_HAVE_ISA (SSVE_FP8FMA) || TARGET_NON_STREAMING))
524 /* fp8 four way dot product enabled through +fp8dot4. */
525 #define TARGET_FP8DOT4 AARCH64_HAVE_ISA (FP8DOT4)
527 /* Streaming versions of fp8 four way dot product instructions are enabled
528 through +ssve-fp8dot4. */
529 #define TARGET_SSVE_FP8DOT4 ((\
530 (TARGET_SVE2 && TARGET_FP8DOT4) || TARGET_STREAMING) \
531 && (AARCH64_HAVE_ISA(SSVE_FP8DOT4) || TARGET_NON_STREAMING))
533 /* fp8 two way dot product enabled through +fp8dot2. */
534 #define TARGET_FP8DOT2 AARCH64_HAVE_ISA (FP8DOT2)
536 /* Streaming versions of fp8 two way dot product instructions are enabled
537 through +ssve-fp8dot2. */
538 #define TARGET_SSVE_FP8DOT2 ((\
539 (TARGET_SVE2 && TARGET_FP8DOT2) || TARGET_STREAMING) \
540 && (AARCH64_HAVE_ISA(SSVE_FP8DOT2) || TARGET_NON_STREAMING))
542 /* Standard register usage. */
544 /* 31 64-bit general purpose registers R0-R30:
545 R30 LR (link register)
546 R29 FP (frame pointer)
547 R19-R28 Callee-saved registers
548 R18 The platform register; use as temporary register.
549 R17 IP1 The second intra-procedure-call temporary register
550 (can be used by call veneers and PLT code); otherwise use
551 as a temporary register
552 R16 IP0 The first intra-procedure-call temporary register (can
553 be used by call veneers and PLT code); otherwise use as a
554 temporary register
555 R9-R15 Temporary registers
556 R8 Structure value parameter / temporary register
557 R0-R7 Parameter/result registers
559 SP stack pointer, encoded as X/R31 where permitted.
560 ZR zero register, encoded as X/R31 elsewhere
562 32 x 128-bit floating-point/vector registers
563 V16-V31 Caller-saved (temporary) registers
564 V8-V15 Callee-saved registers
565 V0-V7 Parameter/result registers
567 The vector register V0 holds scalar B0, H0, S0 and D0 in its least
568 significant bits. Unlike AArch32 S1 is not packed into D0, etc.
570 P0-P7 Predicate low registers: valid in all predicate contexts
571 P8-P15 Predicate high registers: used as scratch space
573 FFR First Fault Register, a fixed-use SVE predicate register
574 FFRT FFR token: a fake register used for modelling dependencies
576 VG Pseudo "vector granules" register
578 VG is the number of 64-bit elements in an SVE vector. We define
579 it as a hard register so that we can easily map it to the DWARF VG
580 register. GCC internally uses the poly_int variable aarch64_sve_vg
581 instead. */
583 #define FIXED_X18 0
584 #define CALL_USED_X18 1
586 #define FIXED_REGISTERS \
588 0, 0, 0, 0, 0, 0, 0, 0, /* R0 - R7 */ \
589 0, 0, 0, 0, 0, 0, 0, 0, /* R8 - R15 */ \
590 0, 0, FIXED_X18, 0, 0, 0, 0, 0, /* R16 - R23. */ \
591 0, 0, 0, 0, 0, 1, 0, 1, /* R24 - R30, SP */ \
592 0, 0, 0, 0, 0, 0, 0, 0, /* V0 - V7 */ \
593 0, 0, 0, 0, 0, 0, 0, 0, /* V8 - V15 */ \
594 0, 0, 0, 0, 0, 0, 0, 0, /* V16 - V23 */ \
595 0, 0, 0, 0, 0, 0, 0, 0, /* V24 - V31 */ \
596 1, 1, 1, 1, /* SFP, AP, CC, VG */ \
597 0, 0, 0, 0, 0, 0, 0, 0, /* P0 - P7 */ \
598 0, 0, 0, 0, 0, 0, 0, 0, /* P8 - P15 */ \
599 1, /* FPMR */ \
600 1, 1, /* FFR and FFRT */ \
601 1, 1, 1, 1, 1, 1, 1, 1 /* Fake registers */ \
604 /* X30 is marked as caller-saved which is in line with regular function call
605 behavior since the call instructions clobber it; AARCH64_EXPAND_CALL does
606 that for regular function calls and avoids it for sibcalls. X30 is
607 considered live for sibcalls; EPILOGUE_USES helps achieve that by returning
608 true but not until function epilogues have been generated. This ensures
609 that X30 is available for use in leaf functions if needed. */
611 #define CALL_REALLY_USED_REGISTERS \
613 1, 1, 1, 1, 1, 1, 1, 1, /* R0 - R7 */ \
614 1, 1, 1, 1, 1, 1, 1, 1, /* R8 - R15 */ \
615 1, 1, CALL_USED_X18, 0, 0, 0, 0, 0, /* R16 - R23. */ \
616 0, 0, 0, 0, 0, 1, 1, 1, /* R24 - R30, SP */ \
617 1, 1, 1, 1, 1, 1, 1, 1, /* V0 - V7 */ \
618 0, 0, 0, 0, 0, 0, 0, 0, /* V8 - V15 */ \
619 1, 1, 1, 1, 1, 1, 1, 1, /* V16 - V23 */ \
620 1, 1, 1, 1, 1, 1, 1, 1, /* V24 - V31 */ \
621 1, 1, 1, 0, /* SFP, AP, CC, VG */ \
622 1, 1, 1, 1, 1, 1, 1, 1, /* P0 - P7 */ \
623 1, 1, 1, 1, 1, 1, 1, 1, /* P8 - P15 */ \
624 1, /* FPMR */ \
625 1, 1, /* FFR and FFRT */ \
626 0, 0, 0, 0, 0, 0, 0, 0 /* Fake registers */ \
629 #define REGISTER_NAMES \
631 "x0", "x1", "x2", "x3", "x4", "x5", "x6", "x7", \
632 "x8", "x9", "x10", "x11", "x12", "x13", "x14", "x15", \
633 "x16", "x17", "x18", "x19", "x20", "x21", "x22", "x23", \
634 "x24", "x25", "x26", "x27", "x28", "x29", "x30", "sp", \
635 "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", \
636 "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", \
637 "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", \
638 "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31", \
639 "sfp", "ap", "cc", "vg", \
640 "p0", "p1", "p2", "p3", "p4", "p5", "p6", "p7", \
641 "p8", "p9", "p10", "p11", "p12", "p13", "p14", "p15", \
642 "fpmr", \
643 "ffr", "ffrt", \
644 "lowering", "tpidr2_block", "sme_state", "tpidr2_setup", \
645 "za_free", "za_saved", "za", "zt0" \
648 /* Generate the register aliases for core register N */
649 #define R_ALIASES(N) {"r" # N, R0_REGNUM + (N)}, \
650 {"w" # N, R0_REGNUM + (N)}
652 #define V_ALIASES(N) {"q" # N, V0_REGNUM + (N)}, \
653 {"d" # N, V0_REGNUM + (N)}, \
654 {"s" # N, V0_REGNUM + (N)}, \
655 {"h" # N, V0_REGNUM + (N)}, \
656 {"b" # N, V0_REGNUM + (N)}, \
657 {"z" # N, V0_REGNUM + (N)}
659 #define P_ALIASES(N) {"pn" # N, P0_REGNUM + (N)}
661 /* Provide aliases for all of the ISA defined register name forms.
662 These aliases are convenient for use in the clobber lists of inline
663 asm statements. */
665 #define ADDITIONAL_REGISTER_NAMES \
666 { R_ALIASES(0), R_ALIASES(1), R_ALIASES(2), R_ALIASES(3), \
667 R_ALIASES(4), R_ALIASES(5), R_ALIASES(6), R_ALIASES(7), \
668 R_ALIASES(8), R_ALIASES(9), R_ALIASES(10), R_ALIASES(11), \
669 R_ALIASES(12), R_ALIASES(13), R_ALIASES(14), R_ALIASES(15), \
670 R_ALIASES(16), R_ALIASES(17), R_ALIASES(18), R_ALIASES(19), \
671 R_ALIASES(20), R_ALIASES(21), R_ALIASES(22), R_ALIASES(23), \
672 R_ALIASES(24), R_ALIASES(25), R_ALIASES(26), R_ALIASES(27), \
673 R_ALIASES(28), R_ALIASES(29), R_ALIASES(30), {"wsp", R0_REGNUM + 31}, \
674 V_ALIASES(0), V_ALIASES(1), V_ALIASES(2), V_ALIASES(3), \
675 V_ALIASES(4), V_ALIASES(5), V_ALIASES(6), V_ALIASES(7), \
676 V_ALIASES(8), V_ALIASES(9), V_ALIASES(10), V_ALIASES(11), \
677 V_ALIASES(12), V_ALIASES(13), V_ALIASES(14), V_ALIASES(15), \
678 V_ALIASES(16), V_ALIASES(17), V_ALIASES(18), V_ALIASES(19), \
679 V_ALIASES(20), V_ALIASES(21), V_ALIASES(22), V_ALIASES(23), \
680 V_ALIASES(24), V_ALIASES(25), V_ALIASES(26), V_ALIASES(27), \
681 V_ALIASES(28), V_ALIASES(29), V_ALIASES(30), V_ALIASES(31), \
682 P_ALIASES(0), P_ALIASES(1), P_ALIASES(2), P_ALIASES(3), \
683 P_ALIASES(4), P_ALIASES(5), P_ALIASES(6), P_ALIASES(7), \
684 P_ALIASES(8), P_ALIASES(9), P_ALIASES(10), P_ALIASES(11), \
685 P_ALIASES(12), P_ALIASES(13), P_ALIASES(14), P_ALIASES(15) \
688 #define EPILOGUE_USES(REGNO) (aarch64_epilogue_uses (REGNO))
690 /* EXIT_IGNORE_STACK should be nonzero if, when returning from a function,
691 the stack pointer does not matter. This is only true if the function
692 uses alloca. */
693 #define EXIT_IGNORE_STACK (cfun->calls_alloca)
695 #define STATIC_CHAIN_REGNUM R18_REGNUM
696 #define HARD_FRAME_POINTER_REGNUM R29_REGNUM
697 #define FRAME_POINTER_REGNUM SFP_REGNUM
698 #define STACK_POINTER_REGNUM SP_REGNUM
699 #define ARG_POINTER_REGNUM AP_REGNUM
700 #define FIRST_PSEUDO_REGISTER (LAST_FAKE_REGNUM + 1)
702 /* The number of argument registers available for each class. */
703 #define NUM_ARG_REGS 8
704 #define NUM_FP_ARG_REGS 8
705 #define NUM_PR_ARG_REGS 4
707 /* A Homogeneous Floating-Point or Short-Vector Aggregate may have at most
708 four members. */
709 #define HA_MAX_NUM_FLDS 4
711 /* External dwarf register number scheme. These number are used to
712 identify registers in dwarf debug information, the values are
713 defined by the AArch64 ABI. The numbering scheme is independent of
714 GCC's internal register numbering scheme. */
716 #define AARCH64_DWARF_R0 0
718 /* The number of R registers, note 31! not 32. */
719 #define AARCH64_DWARF_NUMBER_R 31
721 #define AARCH64_DWARF_SP 31
722 #define AARCH64_DWARF_VG 46
723 #define AARCH64_DWARF_P0 48
724 #define AARCH64_DWARF_V0 64
726 /* The number of V registers. */
727 #define AARCH64_DWARF_NUMBER_V 32
729 /* For signal frames we need to use an alternative return column. This
730 value must not correspond to a hard register and must be out of the
731 range of DWARF_FRAME_REGNUM(). */
732 #define DWARF_ALT_FRAME_RETURN_COLUMN \
733 (AARCH64_DWARF_V0 + AARCH64_DWARF_NUMBER_V)
735 /* We add 1 extra frame register for use as the
736 DWARF_ALT_FRAME_RETURN_COLUMN. */
737 #define DWARF_FRAME_REGISTERS (DWARF_ALT_FRAME_RETURN_COLUMN + 1)
740 #define DEBUGGER_REGNO(REGNO) aarch64_debugger_regno (REGNO)
741 /* Provide a definition of DWARF_FRAME_REGNUM here so that fallback unwinders
742 can use DWARF_ALT_FRAME_RETURN_COLUMN defined below. This is just the same
743 as the default definition in dwarf2out.cc. */
744 #undef DWARF_FRAME_REGNUM
745 #define DWARF_FRAME_REGNUM(REGNO) DEBUGGER_REGNO (REGNO)
747 #define DWARF_FRAME_RETURN_COLUMN DWARF_FRAME_REGNUM (LR_REGNUM)
749 #define DWARF2_UNWIND_INFO 1
751 /* Use R0 through R3 to pass exception handling information. */
752 #define EH_RETURN_DATA_REGNO(N) \
753 ((N) < 4 ? ((unsigned int) R0_REGNUM + (N)) : INVALID_REGNUM)
755 /* Select a format to encode pointers in exception handling data. */
756 #define ASM_PREFERRED_EH_DATA_FORMAT(CODE, GLOBAL) \
757 aarch64_asm_preferred_eh_data_format ((CODE), (GLOBAL))
759 /* Output the assembly strings we want to add to a function definition. */
760 #define ASM_DECLARE_FUNCTION_NAME(STR, NAME, DECL) \
761 aarch64_declare_function_name (STR, NAME, DECL)
763 /* Output assembly strings for alias definition. */
764 #define ASM_OUTPUT_DEF_FROM_DECLS(STR, DECL, TARGET) \
765 aarch64_asm_output_alias (STR, DECL, TARGET)
767 /* Output assembly strings for undefined extern symbols. */
768 #undef ASM_OUTPUT_EXTERNAL
769 #define ASM_OUTPUT_EXTERNAL(STR, DECL, NAME) \
770 aarch64_asm_output_external (STR, DECL, NAME)
772 /* Output assembly strings after .cfi_startproc is emitted. */
773 #define ASM_POST_CFI_STARTPROC aarch64_post_cfi_startproc
775 /* For EH returns X4 is a flag that is set in the EH return
776 code paths and then X5 and X6 contain the stack adjustment
777 and return address respectively. */
778 #define EH_RETURN_TAKEN_RTX gen_rtx_REG (Pmode, R4_REGNUM)
779 #define EH_RETURN_STACKADJ_RTX gen_rtx_REG (Pmode, R5_REGNUM)
780 #define EH_RETURN_HANDLER_RTX gen_rtx_REG (Pmode, R6_REGNUM)
782 #undef TARGET_COMPUTE_FRAME_LAYOUT
783 #define TARGET_COMPUTE_FRAME_LAYOUT aarch64_layout_frame
785 /* Register in which the structure value is to be returned. */
786 #define AARCH64_STRUCT_VALUE_REGNUM R8_REGNUM
788 /* Non-zero if REGNO is part of the Core register set.
790 The rather unusual way of expressing this check is to avoid
791 warnings when building the compiler when R0_REGNUM is 0 and REGNO
792 is unsigned. */
793 #define GP_REGNUM_P(REGNO) \
794 (((unsigned) (REGNO - R0_REGNUM)) <= (R30_REGNUM - R0_REGNUM))
796 /* Registers known to be preserved over a BL instruction. This consists of the
797 GENERAL_REGS without x16, x17, and x30. The x30 register is changed by the
798 BL instruction itself, while the x16 and x17 registers may be used by
799 veneers which can be inserted by the linker. */
800 #define STUB_REGNUM_P(REGNO) \
801 (GP_REGNUM_P (REGNO) \
802 && (REGNO) != R16_REGNUM \
803 && (REGNO) != R17_REGNUM \
804 && (REGNO) != R30_REGNUM) \
806 #define W8_W11_REGNUM_P(REGNO) \
807 IN_RANGE (REGNO, R8_REGNUM, R11_REGNUM)
809 #define W12_W15_REGNUM_P(REGNO) \
810 IN_RANGE (REGNO, R12_REGNUM, R15_REGNUM)
812 #define FP_REGNUM_P(REGNO) \
813 (((unsigned) (REGNO - V0_REGNUM)) <= (V31_REGNUM - V0_REGNUM))
815 #define FP_LO_REGNUM_P(REGNO) \
816 (((unsigned) (REGNO - V0_REGNUM)) <= (V15_REGNUM - V0_REGNUM))
818 #define FP_LO8_REGNUM_P(REGNO) \
819 (((unsigned) (REGNO - V0_REGNUM)) <= (V7_REGNUM - V0_REGNUM))
821 #define PR_REGNUM_P(REGNO)\
822 (((unsigned) (REGNO - P0_REGNUM)) <= (P15_REGNUM - P0_REGNUM))
824 #define PR_LO_REGNUM_P(REGNO)\
825 (((unsigned) (REGNO - P0_REGNUM)) <= (P7_REGNUM - P0_REGNUM))
827 #define FP_SIMD_SAVED_REGNUM_P(REGNO) \
828 (((unsigned) (REGNO - V8_REGNUM)) <= (V23_REGNUM - V8_REGNUM))
830 #define FAKE_REGNUM_P(REGNO) \
831 IN_RANGE (REGNO, FIRST_FAKE_REGNUM, LAST_FAKE_REGNUM)
833 /* Register and constant classes. */
835 enum reg_class
837 NO_REGS,
838 W8_W11_REGS,
839 W12_W15_REGS,
840 TAILCALL_ADDR_REGS,
841 STUB_REGS,
842 GENERAL_REGS,
843 STACK_REG,
844 POINTER_REGS,
845 FP_LO8_REGS,
846 FP_LO_REGS,
847 FP_REGS,
848 POINTER_AND_FP_REGS,
849 PR_LO_REGS,
850 PR_HI_REGS,
851 PR_REGS,
852 FFR_REGS,
853 PR_AND_FFR_REGS,
854 MOVEABLE_SYSREGS,
855 FAKE_REGS,
856 ALL_REGS,
857 LIM_REG_CLASSES /* Last */
860 #define N_REG_CLASSES ((int) LIM_REG_CLASSES)
862 #define REG_CLASS_NAMES \
864 "NO_REGS", \
865 "W8_W11_REGS", \
866 "W12_W15_REGS", \
867 "TAILCALL_ADDR_REGS", \
868 "STUB_REGS", \
869 "GENERAL_REGS", \
870 "STACK_REG", \
871 "POINTER_REGS", \
872 "FP_LO8_REGS", \
873 "FP_LO_REGS", \
874 "FP_REGS", \
875 "POINTER_AND_FP_REGS", \
876 "PR_LO_REGS", \
877 "PR_HI_REGS", \
878 "PR_REGS", \
879 "FFR_REGS", \
880 "PR_AND_FFR_REGS", \
881 "MOVEABLE_SYSREGS", \
882 "FAKE_REGS", \
883 "ALL_REGS" \
886 #define REG_CLASS_CONTENTS \
888 { 0x00000000, 0x00000000, 0x00000000 }, /* NO_REGS */ \
889 { 0x00000f00, 0x00000000, 0x00000000 }, /* W8_W11_REGS */ \
890 { 0x0000f000, 0x00000000, 0x00000000 }, /* W12_W15_REGS */ \
891 { 0x00030000, 0x00000000, 0x00000000 }, /* TAILCALL_ADDR_REGS */\
892 { 0x3ffcffff, 0x00000000, 0x00000000 }, /* STUB_REGS */ \
893 { 0x7fffffff, 0x00000000, 0x00000003 }, /* GENERAL_REGS */ \
894 { 0x80000000, 0x00000000, 0x00000000 }, /* STACK_REG */ \
895 { 0xffffffff, 0x00000000, 0x00000003 }, /* POINTER_REGS */ \
896 { 0x00000000, 0x000000ff, 0x00000000 }, /* FP_LO8_REGS */ \
897 { 0x00000000, 0x0000ffff, 0x00000000 }, /* FP_LO_REGS */ \
898 { 0x00000000, 0xffffffff, 0x00000000 }, /* FP_REGS */ \
899 { 0xffffffff, 0xffffffff, 0x00000003 }, /* POINTER_AND_FP_REGS */\
900 { 0x00000000, 0x00000000, 0x00000ff0 }, /* PR_LO_REGS */ \
901 { 0x00000000, 0x00000000, 0x000ff000 }, /* PR_HI_REGS */ \
902 { 0x00000000, 0x00000000, 0x000ffff0 }, /* PR_REGS */ \
903 { 0x00000000, 0x00000000, 0x00600000 }, /* FFR_REGS */ \
904 { 0x00000000, 0x00000000, 0x006ffff0 }, /* PR_AND_FFR_REGS */ \
905 { 0x00000000, 0x00000000, 0x00100000 }, /* MOVEABLE_SYSREGS */ \
906 { 0x00000000, 0x00000000, 0x7f800000 }, /* FAKE_REGS */ \
907 { 0xffffffff, 0xffffffff, 0x001fffff } /* ALL_REGS */ \
910 #define REGNO_REG_CLASS(REGNO) aarch64_regno_regclass (REGNO)
912 #define INDEX_REG_CLASS GENERAL_REGS
913 #define BASE_REG_CLASS POINTER_REGS
915 /* Register pairs used to eliminate unneeded registers that point into
916 the stack frame. */
917 #define ELIMINABLE_REGS \
919 { ARG_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
920 { ARG_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \
921 { FRAME_POINTER_REGNUM, STACK_POINTER_REGNUM }, \
922 { FRAME_POINTER_REGNUM, HARD_FRAME_POINTER_REGNUM }, \
925 #define INITIAL_ELIMINATION_OFFSET(FROM, TO, OFFSET) \
926 (OFFSET) = aarch64_initial_elimination_offset (FROM, TO)
928 /* CPU/ARCH option handling. */
929 #include "config/aarch64/aarch64-opts.h"
931 /* If there is no CPU defined at configure, use generic as default. */
932 #ifndef TARGET_CPU_DEFAULT
933 # define TARGET_CPU_DEFAULT AARCH64_CPU_generic_armv8_a
934 #endif
936 /* If inserting NOP before a mult-accumulate insn remember to adjust the
937 length so that conditional branching code is updated appropriately. */
938 #define ADJUST_INSN_LENGTH(insn, length) \
939 do \
941 if (aarch64_madd_needs_nop (insn)) \
942 length += 4; \
943 } while (0)
945 #define FINAL_PRESCAN_INSN(INSN, OPVEC, NOPERANDS) \
946 aarch64_final_prescan_insn (INSN); \
948 /* The processor for which instructions should be scheduled. */
949 extern enum aarch64_cpu aarch64_tune;
951 /* RTL generation support. */
952 #define INIT_EXPANDERS aarch64_init_expanders ()
955 /* Stack layout; function entry, exit and calling. */
956 #define STACK_GROWS_DOWNWARD 1
958 #define FRAME_GROWS_DOWNWARD 1
960 #define ACCUMULATE_OUTGOING_ARGS 1
962 #define FIRST_PARM_OFFSET(FNDECL) 0
964 /* Fix for VFP */
965 #define LIBCALL_VALUE(MODE) \
966 gen_rtx_REG (MODE, FLOAT_MODE_P (MODE) ? V0_REGNUM : R0_REGNUM)
968 #define DEFAULT_PCC_STRUCT_RETURN 0
970 #if defined(HAVE_POLY_INT_H) && defined(GCC_VEC_H)
971 struct GTY (()) aarch64_frame
973 /* The offset from the bottom of the static frame (the bottom of the
974 outgoing arguments) of each register save slot, or -2 if no save is
975 needed. */
976 poly_int64 reg_offset[LAST_SAVED_REGNUM + 1];
978 /* The list of GPRs, FPRs and predicate registers that have nonnegative
979 entries in reg_offset. The registers are listed in order of
980 increasing offset (rather than increasing register number). */
981 vec<unsigned, va_gc_atomic> *saved_gprs;
982 vec<unsigned, va_gc_atomic> *saved_fprs;
983 vec<unsigned, va_gc_atomic> *saved_prs;
985 /* The offset from the base of the frame of a 64-bit slot whose low
986 bit contains the incoming value of PSTATE.SM. This slot must be
987 within reach of the hard frame pointer.
989 The offset is -1 if such a slot isn't needed. */
990 poly_int64 old_svcr_offset;
992 /* The number of extra stack bytes taken up by register varargs.
993 This area is allocated by the callee at the very top of the
994 frame. This value is rounded up to a multiple of
995 STACK_BOUNDARY. */
996 HOST_WIDE_INT saved_varargs_size;
998 /* The number of bytes between the bottom of the static frame (the bottom
999 of the outgoing arguments) and the bottom of the register save area.
1000 This value is always a multiple of STACK_BOUNDARY. */
1001 poly_int64 bytes_below_saved_regs;
1003 /* The number of bytes between the bottom of the static frame (the bottom
1004 of the outgoing arguments) and the hard frame pointer. This value is
1005 always a multiple of STACK_BOUNDARY. */
1006 poly_int64 bytes_below_hard_fp;
1008 /* The number of bytes between the top of the locals area and the top
1009 of the frame (the incomming SP). This value is always a multiple of
1010 STACK_BOUNDARY. */
1011 poly_int64 bytes_above_locals;
1013 /* The number of bytes between the hard_frame_pointer and the top of
1014 the frame (the incomming SP). This value is always a multiple of
1015 STACK_BOUNDARY. */
1016 poly_int64 bytes_above_hard_fp;
1018 /* The size of the frame, i.e. the number of bytes between the bottom
1019 of the outgoing arguments and the incoming SP. This value is always
1020 a multiple of STACK_BOUNDARY. */
1021 poly_int64 frame_size;
1023 /* The size of the initial stack adjustment before saving callee-saves. */
1024 poly_int64 initial_adjust;
1026 /* The writeback value when pushing callee-save registers.
1027 It is zero when no push is used. */
1028 HOST_WIDE_INT callee_adjust;
1030 /* The size of the stack adjustment before saving or after restoring
1031 SVE registers. */
1032 poly_int64 sve_callee_adjust;
1034 /* The size of the stack adjustment after saving callee-saves. */
1035 poly_int64 final_adjust;
1037 /* Store FP,LR and setup a frame pointer. */
1038 bool emit_frame_chain;
1040 /* In each frame, we can associate up to two register saves with the
1041 initial stack allocation. This happens in one of two ways:
1043 (1) Using an STR or STP with writeback to perform the initial
1044 stack allocation. When EMIT_FRAME_CHAIN, the registers will
1045 be those needed to create a frame chain.
1047 Indicated by CALLEE_ADJUST != 0.
1049 (2) Using a separate STP to set up the frame record, after the
1050 initial stack allocation but before setting up the frame pointer.
1051 This is used if the offset is too large to use writeback.
1053 Indicated by CALLEE_ADJUST == 0 && EMIT_FRAME_CHAIN.
1055 These fields indicate which registers we've decided to handle using
1056 (1) or (2), or INVALID_REGNUM if none.
1058 In some cases we don't always need to pop all registers in the push
1059 candidates, pop candidates record which registers need to be popped
1060 eventually. The initial value of a pop candidate is copied from its
1061 corresponding push candidate.
1063 Currently, different pop candidates are only used for shadow call
1064 stack. When "-fsanitize=shadow-call-stack" is specified, we replace
1065 x30 in the pop candidate with INVALID_REGNUM to ensure that x30 is
1066 not popped twice. */
1067 unsigned wb_push_candidate1;
1068 unsigned wb_push_candidate2;
1069 unsigned wb_pop_candidate1;
1070 unsigned wb_pop_candidate2;
1072 /* Big-endian SVE frames need a spare predicate register in order
1073 to save vector registers in the correct layout for unwinding.
1074 This is the register they should use. */
1075 unsigned spare_pred_reg;
1077 /* An SVE register that is saved below the hard frame pointer and that acts
1078 as a probe for later allocations, or INVALID_REGNUM if none. */
1079 unsigned sve_save_and_probe;
1081 /* A register that is saved at the hard frame pointer and that acts
1082 as a probe for later allocations, or INVALID_REGNUM if none. */
1083 unsigned hard_fp_save_and_probe;
1085 bool laid_out;
1087 /* True if shadow call stack should be enabled for the current function. */
1088 bool is_scs_enabled;
1091 /* Private to winnt.cc. */
1092 struct seh_frame_state;
1094 #ifdef hash_set_h
1095 typedef struct GTY (()) machine_function
1097 struct aarch64_frame frame;
1098 /* One entry for each hard register. */
1099 bool reg_is_wrapped_separately[LAST_SAVED_REGNUM + 1];
1100 /* One entry for each general purpose register. */
1101 rtx call_via[SP_REGNUM];
1103 /* A pseudo register that points to the function's TPIDR2 block, or null
1104 if the function doesn't have a TPIDR2 block. */
1105 rtx tpidr2_block;
1107 /* A pseudo register that points to the function's ZA save buffer,
1108 or null if none. */
1109 rtx za_save_buffer;
1111 /* A stack slot that stores the contents of the function's ZT0 state. */
1112 rtx zt0_save_buffer;
1114 bool label_is_assembled;
1116 /* True if we've expanded at least one call to a function that changes
1117 PSTATE.SM. This should only be used for saving compile time: false
1118 guarantees that no such mode switch exists. */
1119 bool call_switches_pstate_sm;
1121 /* Used to generated unique identifiers for each update to ZA by an
1122 asm statement. */
1123 unsigned int next_asm_update_za_id;
1125 /* A set of all decls that have been passed to a vld1 intrinsic in the
1126 current function. This is used to help guide the vector cost model. */
1127 hash_set<tree> *vector_load_decls;
1129 /* An instruction that was emitted at the start of the function to
1130 set an Advanced SIMD pseudo register to zero. If the instruction
1131 still exists and still fulfils its original purpose. the same register
1132 can be reused by other code. */
1133 rtx_insn *advsimd_zero_insn;
1135 /* During SEH output, this is non-null. */
1136 struct seh_frame_state * GTY ((skip (""))) seh;
1137 } machine_function;
1138 #endif
1139 #endif
1141 /* Which ABI to use. */
1142 enum aarch64_abi_type
1144 AARCH64_ABI_LP64 = 0,
1145 AARCH64_ABI_ILP32 = 1
1148 #ifndef AARCH64_ABI_DEFAULT
1149 #define AARCH64_ABI_DEFAULT AARCH64_ABI_LP64
1150 #endif
1152 #define TARGET_ILP32 (aarch64_abi & AARCH64_ABI_ILP32)
1154 enum arm_pcs
1156 ARM_PCS_AAPCS64, /* Base standard AAPCS for 64 bit. */
1157 ARM_PCS_SIMD, /* For aarch64_vector_pcs functions. */
1158 ARM_PCS_SVE, /* For functions that pass or return
1159 values in SVE registers. */
1160 ARM_PCS_TLSDESC, /* For targets of tlsdesc calls. */
1161 ARM_PCS_UNKNOWN
1167 /* We can't use machine_mode inside a generator file because it
1168 hasn't been created yet; we shouldn't be using any code that
1169 needs the real definition though, so this ought to be safe. */
1170 #ifdef GENERATOR_FILE
1171 #define MACHMODE int
1172 #else
1173 #include "insn-modes.h"
1174 #define MACHMODE machine_mode
1175 #endif
1177 #ifndef USED_FOR_TARGET
1178 /* AAPCS related state tracking. */
1179 typedef struct
1181 enum arm_pcs pcs_variant;
1182 aarch64_isa_mode isa_mode;
1183 bool indirect_return; /* Whether function is marked with indirect_return attribute. */
1184 int aapcs_arg_processed; /* No need to lay out this argument again. */
1185 int aapcs_ncrn; /* Next Core register number. */
1186 int aapcs_nextncrn; /* Next next core register number. */
1187 int aapcs_nvrn; /* Next Vector register number. */
1188 int aapcs_nextnvrn; /* Next Next Vector register number. */
1189 int aapcs_nprn; /* Next Predicate register number. */
1190 int aapcs_nextnprn; /* Next Next Predicate register number. */
1191 rtx aapcs_reg; /* Register assigned to this argument. This
1192 is NULL_RTX if this parameter goes on
1193 the stack. */
1194 MACHMODE aapcs_vfp_rmode;
1195 int aapcs_stack_words; /* If the argument is passed on the stack, this
1196 is the number of words needed, after rounding
1197 up. Only meaningful when
1198 aapcs_reg == NULL_RTX. */
1199 int aapcs_stack_size; /* The total size (in words, per 8 byte) of the
1200 stack arg area so far. */
1201 bool silent_p; /* True if we should act silently, rather than
1202 raise an error for invalid calls. */
1204 /* AARCH64_STATE_* flags that describe whether the function shares ZA
1205 and ZT0 with its callers. */
1206 unsigned int shared_za_flags;
1207 unsigned int shared_zt0_flags;
1209 /* A list of registers that need to be saved and restored around a
1210 change to PSTATE.SM. An auto_vec would be more convenient, but those
1211 can't be copied. */
1212 unsigned int num_sme_mode_switch_args;
1213 rtx sme_mode_switch_args[12];
1214 } CUMULATIVE_ARGS;
1215 #endif
1217 #define BLOCK_REG_PADDING(MODE, TYPE, FIRST) \
1218 (aarch64_pad_reg_upward (MODE, TYPE, FIRST) ? PAD_UPWARD : PAD_DOWNWARD)
1220 #define PAD_VARARGS_DOWN 0
1222 #define INIT_CUMULATIVE_ARGS(CUM, FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS) \
1223 aarch64_init_cumulative_args (&(CUM), FNTYPE, LIBNAME, FNDECL, N_NAMED_ARGS)
1225 #define FUNCTION_ARG_REGNO_P(REGNO) \
1226 aarch64_function_arg_regno_p(REGNO)
1229 /* ISA Features. */
1231 /* Addressing modes, etc. */
1232 #define HAVE_POST_INCREMENT 1
1233 #define HAVE_PRE_INCREMENT 1
1234 #define HAVE_POST_DECREMENT 1
1235 #define HAVE_PRE_DECREMENT 1
1236 #define HAVE_POST_MODIFY_DISP 1
1237 #define HAVE_PRE_MODIFY_DISP 1
1239 #define MAX_REGS_PER_ADDRESS 2
1241 #define CONSTANT_ADDRESS_P(X) aarch64_constant_address_p(X)
1243 #define REGNO_OK_FOR_BASE_P(REGNO) \
1244 aarch64_regno_ok_for_base_p (REGNO, true)
1246 #define REGNO_OK_FOR_INDEX_P(REGNO) \
1247 aarch64_regno_ok_for_index_p (REGNO, true)
1249 #define LEGITIMATE_PIC_OPERAND_P(X) \
1250 aarch64_legitimate_pic_operand_p (X)
1252 #define CASE_VECTOR_MODE Pmode
1254 #define DEFAULT_SIGNED_CHAR 0
1256 /* An integer expression for the size in bits of the largest integer machine
1257 mode that should actually be used. We allow pairs of registers. */
1258 #define MAX_FIXED_MODE_SIZE GET_MODE_BITSIZE (TImode)
1260 /* Maximum bytes moved by a single instruction (load/store pair). */
1261 #define MOVE_MAX (UNITS_PER_WORD * 2)
1263 /* The base cost overhead of a memcpy call, for MOVE_RATIO and friends. */
1264 #define AARCH64_CALL_RATIO 8
1266 /* MOVE_RATIO dictates when we will use the move_by_pieces infrastructure.
1267 move_by_pieces will continually copy the largest safe chunks. So a
1268 7-byte copy is a 4-byte + 2-byte + byte copy. This proves inefficient
1269 for both size and speed of copy, so we will instead use the "cpymem"
1270 standard name to implement the copy. This logic does not apply when
1271 targeting -mstrict-align or TARGET_MOPS, so keep a sensible default in
1272 that case. */
1273 #define MOVE_RATIO(speed) \
1274 ((!STRICT_ALIGNMENT || TARGET_MOPS) ? 2 : (((speed) ? 15 : AARCH64_CALL_RATIO) / 2))
1276 /* Like MOVE_RATIO, without -mstrict-align, make decisions in "setmem" when
1277 we would use more than 3 scalar instructions.
1278 Otherwise follow a sensible default: when optimizing for size, give a better
1279 estimate of the length of a memset call, but use the default otherwise. */
1280 #define CLEAR_RATIO(speed) \
1281 (!STRICT_ALIGNMENT ? (TARGET_MOPS ? 0 : 4) : (speed) ? 15 : AARCH64_CALL_RATIO)
1283 /* SET_RATIO is similar to CLEAR_RATIO, but for a non-zero constant. Without
1284 -mstrict-align, make decisions in "setmem". Otherwise follow a sensible
1285 default: when optimizing for size adjust the ratio to account for the
1286 overhead of loading the constant. */
1287 #define SET_RATIO(speed) \
1288 ((!STRICT_ALIGNMENT || TARGET_MOPS) ? 0 : (speed) ? 15 : AARCH64_CALL_RATIO - 2)
1290 /* Disable auto-increment in move_by_pieces et al. Use of auto-increment is
1291 rarely a good idea in straight-line code since it adds an extra address
1292 dependency between each instruction. Better to use incrementing offsets. */
1293 #define USE_LOAD_POST_INCREMENT(MODE) 0
1294 #define USE_LOAD_POST_DECREMENT(MODE) 0
1295 #define USE_LOAD_PRE_INCREMENT(MODE) 0
1296 #define USE_LOAD_PRE_DECREMENT(MODE) 0
1297 #define USE_STORE_POST_INCREMENT(MODE) 0
1298 #define USE_STORE_POST_DECREMENT(MODE) 0
1299 #define USE_STORE_PRE_INCREMENT(MODE) 0
1300 #define USE_STORE_PRE_DECREMENT(MODE) 0
1302 /* WORD_REGISTER_OPERATIONS does not hold for AArch64.
1303 The assigned word_mode is DImode but operations narrower than SImode
1304 behave as 32-bit operations if using the W-form of the registers rather
1305 than as word_mode (64-bit) operations as WORD_REGISTER_OPERATIONS
1306 expects. */
1307 #define WORD_REGISTER_OPERATIONS 0
1309 /* Define if loading from memory in MODE, an integral mode narrower than
1310 BITS_PER_WORD will either zero-extend or sign-extend. The value of this
1311 macro should be the code that says which one of the two operations is
1312 implicitly done, or UNKNOWN if none. */
1313 #define LOAD_EXTEND_OP(MODE) ZERO_EXTEND
1315 /* Define this macro to be non-zero if instructions will fail to work
1316 if given data not on the nominal alignment. */
1317 #define STRICT_ALIGNMENT TARGET_STRICT_ALIGN
1319 /* Enable wide bitfield accesses for more efficient bitfield code. */
1320 #define SLOW_BYTE_ACCESS 1
1322 #define NO_FUNCTION_CSE 1
1324 /* Specify the machine mode that the hardware addresses have.
1325 After generation of rtl, the compiler makes no further distinction
1326 between pointers and any other objects of this machine mode. */
1327 #define Pmode DImode
1329 /* A C expression whose value is zero if pointers that need to be extended
1330 from being `POINTER_SIZE' bits wide to `Pmode' are sign-extended and
1331 greater then zero if they are zero-extended and less then zero if the
1332 ptr_extend instruction should be used. */
1333 #define POINTERS_EXTEND_UNSIGNED 1
1335 /* Mode of a function address in a call instruction (for indexing purposes). */
1336 #define FUNCTION_MODE Pmode
1338 #define SELECT_CC_MODE(OP, X, Y) aarch64_select_cc_mode (OP, X, Y)
1340 /* Having an integer comparison mode guarantees that we can use
1341 reverse_condition, but the usual restrictions apply to floating-point
1342 comparisons. */
1343 #define REVERSIBLE_CC_MODE(MODE) ((MODE) != CCFPmode && (MODE) != CCFPEmode)
1345 #define CLZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
1346 ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
1347 #define CTZ_DEFINED_VALUE_AT_ZERO(MODE, VALUE) \
1348 ((VALUE) = GET_MODE_UNIT_BITSIZE (MODE), 2)
1350 /* Have space for both SP and GCSPR in the NONLOCAL case in
1351 emit_stack_save as well as in __builtin_setjmp, __builtin_longjmp
1352 and __builtin_nonlocal_goto.
1353 Note: On ILP32 the documented buf size is not enough PR84150. */
1354 #define STACK_SAVEAREA_MODE(LEVEL) \
1355 ((LEVEL) == SAVE_NONLOCAL ? E_CDImode : Pmode)
1357 #define INCOMING_RETURN_ADDR_RTX gen_rtx_REG (Pmode, LR_REGNUM)
1359 #define RETURN_ADDR_RTX aarch64_return_addr
1361 /* BTI c + 3 insns
1362 + sls barrier of DSB + ISB.
1363 + 2 pointer-sized entries. */
1364 #define TRAMPOLINE_SIZE (24 + (TARGET_ILP32 ? 8 : 16))
1366 /* Trampolines contain dwords, so must be dword aligned. */
1367 #define TRAMPOLINE_ALIGNMENT 64
1369 /* Put trampolines in the text section so that mapping symbols work
1370 correctly. */
1371 #define TRAMPOLINE_SECTION text_section
1373 /* To start with. */
1374 #define BRANCH_COST(SPEED_P, PREDICTABLE_P) \
1375 (aarch64_branch_cost (SPEED_P, PREDICTABLE_P))
1378 /* Assembly output. */
1380 /* For now we'll make all jump tables pc-relative. */
1381 #define CASE_VECTOR_PC_RELATIVE 1
1383 #define CASE_VECTOR_SHORTEN_MODE(min, max, body) \
1384 ((min < -0x1fff0 || max > 0x1fff0) ? SImode \
1385 : (min < -0x1f0 || max > 0x1f0) ? HImode \
1386 : QImode)
1388 /* Jump table alignment is explicit in ASM_OUTPUT_CASE_LABEL. */
1389 #define ADDR_VEC_ALIGN(JUMPTABLE) 0
1391 #define MCOUNT_NAME "_mcount"
1393 #define NO_PROFILE_COUNTERS 1
1395 /* Emit rtl for profiling. Output assembler code to FILE
1396 to call "_mcount" for profiling a function entry. */
1397 #define PROFILE_HOOK(LABEL) \
1399 rtx fun, lr; \
1400 lr = aarch64_return_addr_rtx (); \
1401 fun = gen_rtx_SYMBOL_REF (Pmode, MCOUNT_NAME); \
1402 emit_library_call (fun, LCT_NORMAL, VOIDmode, lr, Pmode); \
1405 /* All the work done in PROFILE_HOOK, but still required. */
1406 #define FUNCTION_PROFILER(STREAM, LABELNO) do { } while (0)
1408 /* For some reason, the Linux headers think they know how to define
1409 these macros. They don't!!! */
1410 #undef ASM_APP_ON
1411 #undef ASM_APP_OFF
1412 #define ASM_APP_ON "\t" ASM_COMMENT_START " Start of user assembly\n"
1413 #define ASM_APP_OFF "\t" ASM_COMMENT_START " End of user assembly\n"
1415 #define CONSTANT_POOL_BEFORE_FUNCTION 0
1417 /* This definition should be relocated to aarch64-elf-raw.h. This macro
1418 should be undefined in aarch64-linux.h and a clear_cache pattern
1419 implmented to emit either the call to __aarch64_sync_cache_range()
1420 directly or preferably the appropriate sycall or cache clear
1421 instructions inline. */
1422 #define CLEAR_INSN_CACHE(beg, end) \
1423 extern void __aarch64_sync_cache_range (void *, void *); \
1424 __aarch64_sync_cache_range (beg, end)
1426 #define SHIFT_COUNT_TRUNCATED (!TARGET_SIMD)
1428 /* Choose appropriate mode for caller saves, so we do the minimum
1429 required size of load/store. */
1430 #define HARD_REGNO_CALLER_SAVE_MODE(REGNO, NREGS, MODE) \
1431 aarch64_hard_regno_caller_save_mode ((REGNO), (NREGS), (MODE))
1433 #undef SWITCHABLE_TARGET
1434 #define SWITCHABLE_TARGET 1
1436 /* Check TLS Descriptors mechanism is selected. */
1437 #define TARGET_TLS_DESC (aarch64_tls_dialect == TLS_DESCRIPTORS)
1439 extern enum aarch64_code_model aarch64_cmodel;
1441 /* When using the tiny addressing model conditional and unconditional branches
1442 can span the whole of the available address space (1MB). */
1443 #define HAS_LONG_COND_BRANCH \
1444 (aarch64_cmodel == AARCH64_CMODEL_TINY \
1445 || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC)
1447 #define HAS_LONG_UNCOND_BRANCH \
1448 (aarch64_cmodel == AARCH64_CMODEL_TINY \
1449 || aarch64_cmodel == AARCH64_CMODEL_TINY_PIC)
1451 #define TARGET_HAS_FMV_TARGET_ATTRIBUTE 0
1453 #define TARGET_SUPPORTS_WIDE_INT 1
1455 /* Modes valid for AdvSIMD D registers, i.e. that fit in half a Q register. */
1456 #define AARCH64_VALID_SIMD_DREG_MODE(MODE) \
1457 ((MODE) == V2SImode || (MODE) == V4HImode || (MODE) == V8QImode \
1458 || (MODE) == V2SFmode || (MODE) == V4HFmode || (MODE) == DImode \
1459 || (MODE) == DFmode || (MODE) == V4BFmode)
1461 /* Modes valid for AdvSIMD Q registers. */
1462 #define AARCH64_VALID_SIMD_QREG_MODE(MODE) \
1463 ((MODE) == V4SImode || (MODE) == V8HImode || (MODE) == V16QImode \
1464 || (MODE) == V4SFmode || (MODE) == V8HFmode || (MODE) == V2DImode \
1465 || (MODE) == V2DFmode || (MODE) == V8BFmode)
1467 #define ENDIAN_LANE_N(NUNITS, N) \
1468 (BYTES_BIG_ENDIAN ? NUNITS - 1 - N : N)
1470 /* Extra specs when building a native AArch64-hosted compiler.
1471 Option rewriting rules based on host system. */
1472 #if defined(__aarch64__)
1473 extern const char *host_detect_local_cpu (int argc, const char **argv);
1474 #define HAVE_LOCAL_CPU_DETECT
1475 # define EXTRA_SPEC_FUNCTIONS \
1476 { "local_cpu_detect", host_detect_local_cpu }, \
1477 AARCH64_BASE_SPEC_FUNCTIONS
1479 /* Rewrite -m{arch,cpu,tune}=native based on the host system information.
1480 When rewriting -march=native convert it into an -mcpu option if no other
1481 -mcpu or -mtune was given. */
1482 # define MCPU_MTUNE_NATIVE_SPECS \
1483 " %{march=native:%<march=native %:local_cpu_detect(%{mcpu=*|mtune=*:arch;:cpu})}" \
1484 " %{mcpu=native:%<mcpu=native %:local_cpu_detect(cpu)}" \
1485 " %{mtune=native:%<mtune=native %:local_cpu_detect(tune)}"
1486 /* This will be used in OPTION_DEFAULT_SPECS below.
1487 When GCC is configured with --with-tune we don't want to materialize an
1488 implicit -mtune would prevent the rewriting of -march=native into
1489 -mcpu=native as per the above rules. */
1490 #define CONFIG_TUNE_SPEC \
1491 { "tune", "%{!mcpu=*:%{!mtune=*:%{!march=native:-mtune=%(VALUE)}}}" },
1492 #else
1493 # define MCPU_MTUNE_NATIVE_SPECS ""
1494 # define EXTRA_SPEC_FUNCTIONS AARCH64_BASE_SPEC_FUNCTIONS
1495 # define CONFIG_TUNE_SPEC \
1496 {"tune", "%{!mcpu=*:%{!mtune=*:-mtune=%(VALUE)}}"},
1497 #endif
1499 /* Support for configure-time --with-arch, --with-cpu and --with-tune.
1500 --with-arch and --with-cpu are ignored if either -mcpu or -march is used.
1501 --with-tune is ignored if either -mtune or -mcpu is used (but is not
1502 affected by -march, except in the -march=native case as per the
1503 CONFIG_TUNE_SPEC above). */
1504 #define OPTION_DEFAULT_SPECS \
1505 {"arch", "%{!march=*:%{!mcpu=*:-march=%(VALUE)}}" }, \
1506 {"cpu", "%{!march=*:%{!mcpu=*:-mcpu=%(VALUE)}}" }, \
1507 CONFIG_TUNE_SPEC
1509 #define MARCH_REWRITE_SPEC \
1510 "%{march=*:-march=%:rewrite_march(%{march=*:%*});" \
1511 "mcpu=*:-march=%:rewrite_mcpu(%{mcpu=*:%*})}"
1513 extern const char *aarch64_rewrite_march (int argc, const char **argv);
1514 extern const char *aarch64_rewrite_mcpu (int argc, const char **argv);
1515 extern const char *is_host_cpu_not_armv8_base (int argc, const char **argv);
1516 #define AARCH64_BASE_SPEC_FUNCTIONS \
1517 { "rewrite_march", aarch64_rewrite_march }, \
1518 { "rewrite_mcpu", aarch64_rewrite_mcpu }, \
1519 { "is_local_not_armv8_base", is_host_cpu_not_armv8_base },
1522 #define ASM_CPU_SPEC \
1523 MARCH_REWRITE_SPEC
1525 #define EXTRA_SPECS \
1526 { "asm_cpu_spec", ASM_CPU_SPEC }
1528 #define ASM_OUTPUT_POOL_EPILOGUE aarch64_asm_output_pool_epilogue
1530 /* This type is the user-visible __mfp8, and a pointer to that type. We
1531 need it in many places in the backend. Defined in aarch64-builtins.cc. */
1532 extern GTY(()) tree aarch64_mfp8_type_node;
1533 extern GTY(()) tree aarch64_mfp8_ptr_type_node;
1535 /* This type is the user-visible __fp16, and a pointer to that type. We
1536 need it in many places in the backend. Defined in aarch64-builtins.cc. */
1537 extern GTY(()) tree aarch64_fp16_type_node;
1538 extern GTY(()) tree aarch64_fp16_ptr_type_node;
1540 /* Pointer to the user-visible __bf16 type. __bf16 itself is generic
1541 bfloat16_type_node. Defined in aarch64-builtins.cc. */
1542 extern GTY(()) tree aarch64_bf16_ptr_type_node;
1544 /* The generic unwind code in libgcc does not initialize the frame pointer.
1545 So in order to unwind a function using a frame pointer, the very first
1546 function that is unwound must save the frame pointer. That way the frame
1547 pointer is restored and its value is now valid - otherwise _Unwind_GetGR
1548 crashes. Libgcc can now be safely built with -fomit-frame-pointer. */
1549 #define LIBGCC2_UNWIND_ATTRIBUTE \
1550 __attribute__((optimize ("no-omit-frame-pointer")))
1552 #ifndef USED_FOR_TARGET
1553 extern poly_uint16 aarch64_sve_vg;
1555 /* The number of bits and bytes in an SVE vector. */
1556 #define BITS_PER_SVE_VECTOR (poly_uint16 (aarch64_sve_vg * 64))
1557 #define BYTES_PER_SVE_VECTOR (poly_uint16 (aarch64_sve_vg * 8))
1559 /* The number of bits and bytes in an SVE predicate. */
1560 #define BITS_PER_SVE_PRED BYTES_PER_SVE_VECTOR
1561 #define BYTES_PER_SVE_PRED aarch64_sve_vg
1563 /* The SVE mode for a vector of bytes. */
1564 #define SVE_BYTE_MODE VNx16QImode
1566 /* The maximum number of bytes in a fixed-size vector. This is 256 bytes
1567 (for -msve-vector-bits=2048) multiplied by the maximum number of
1568 vectors in a structure mode (4).
1570 This limit must not be used for variable-size vectors, since
1571 VL-agnostic code must work with arbitary vector lengths. */
1572 #define MAX_COMPILE_TIME_VEC_BYTES (256 * 4)
1573 #endif
1575 #define REGMODE_NATURAL_SIZE(MODE) aarch64_regmode_natural_size (MODE)
1577 /* Allocate a minimum of STACK_CLASH_MIN_BYTES_OUTGOING_ARGS bytes for the
1578 outgoing arguments if stack clash protection is enabled. This is essential
1579 as the extra arg space allows us to skip a check in alloca. */
1580 #undef STACK_DYNAMIC_OFFSET
1581 #define STACK_DYNAMIC_OFFSET(FUNDECL) \
1582 ((flag_stack_clash_protection \
1583 && cfun->calls_alloca \
1584 && known_lt (crtl->outgoing_args_size, \
1585 STACK_CLASH_MIN_BYTES_OUTGOING_ARGS)) \
1586 ? ROUND_UP (STACK_CLASH_MIN_BYTES_OUTGOING_ARGS, \
1587 STACK_BOUNDARY / BITS_PER_UNIT) \
1588 : (crtl->outgoing_args_size + STACK_POINTER_OFFSET))
1590 /* Filled in by aarch64_adjust_reg_alloc_order, which is called before
1591 the first relevant use. */
1592 #define REG_ALLOC_ORDER {}
1593 #define ADJUST_REG_ALLOC_ORDER aarch64_adjust_reg_alloc_order ()
1595 #define AARCH64_VALID_SHRN_OP(T,S) \
1596 ((T) == TRUNCATE \
1597 || ((T) == US_TRUNCATE && (S) == LSHIFTRT) \
1598 || ((T) == SS_TRUNCATE && (S) == ASHIFTRT))
1600 #ifndef USED_FOR_TARGET
1602 /* Enumerates the mode-switching "entities" for AArch64. */
1603 enum class aarch64_mode_entity : int
1605 /* An aarch64_tristate_mode that says whether we have created a local
1606 save buffer for the current function's ZA state. The only transition
1607 is from NO to YES. */
1608 HAVE_ZA_SAVE_BUFFER,
1610 /* An aarch64_local_sme_state that reflects the state of all data
1611 controlled by PSTATE.ZA. */
1612 LOCAL_SME_STATE
1615 /* Describes the state of all data controlled by PSTATE.ZA */
1616 enum class aarch64_local_sme_state : int
1618 /* ZA is in the off or dormant state. If it is dormant, the contents
1619 of ZA belong to a caller. */
1620 INACTIVE_CALLER,
1622 /* ZA is in the off state: PSTATE.ZA is 0 and TPIDR2_EL0 is null. */
1623 OFF,
1625 /* ZA is in the off or dormant state. If it is dormant, the contents
1626 of ZA belong to the current function. */
1627 INACTIVE_LOCAL,
1629 /* ZA is in the off state and the current function's ZA contents are
1630 stored in the lazy save buffer. This is the state on entry to
1631 exception handlers. */
1632 SAVED_LOCAL,
1634 /* ZA is in the active state: PSTATE.ZA is 1 and TPIDR2_EL0 is null.
1635 The contents of ZA are live. */
1636 ACTIVE_LIVE,
1638 /* ZA is in the active state: PSTATE.ZA is 1 and TPIDR2_EL0 is null.
1639 The contents of ZA are dead. */
1640 ACTIVE_DEAD,
1642 /* ZA could be in multiple states. */
1646 enum class aarch64_tristate_mode : int { NO, YES, MAYBE };
1648 #define OPTIMIZE_MODE_SWITCHING(ENTITY) \
1649 aarch64_optimize_mode_switching (aarch64_mode_entity (ENTITY))
1651 #define NUM_MODES_FOR_MODE_SWITCHING \
1652 { int (aarch64_tristate_mode::MAYBE), \
1653 int (aarch64_local_sme_state::ANY) }
1655 /* Zero terminated list of regnos for which hardreg PRE should be
1656 applied. */
1657 #define HARDREG_PRE_REGNOS { FPM_REGNUM, 0 }
1659 #endif
1661 #endif /* GCC_AARCH64_H */