1 /* Machine description for AArch64 architecture.
2 Copyright (C
) 2009-2025 Free Software Foundation
, Inc.
3 Contributed by ARM Ltd.
5 This file is part of GCC.
7 GCC is free software
; you can redistribute it and
/or modify it
8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation
; either version
3, or (at your option
)
12 GCC is distributed in the hope that it will be useful
, but
13 WITHOUT ANY WARRANTY
; without even the implied warranty of
14 MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
17 You should have received a copy of the GNU General Public License
18 along with GCC
; see the file COPYING3. If not see
19 <http
://www.gnu.org
/licenses
/>.
*/
21 /* Important note about Carry generation in AArch64.
23 Unlike some architectures
, the C flag generated by a subtract
24 operation
, or a simple compare operation is set to
1 if the result
25 does not overflow in an unsigned sense. That is
, if there is no
26 borrow needed from a higher word. That means that overflow from
27 addition will set C
, but overflow from a subtraction will clear C.
28 We use CC_Cmode to represent detection of overflow from addition as
29 CCmode is used for
'normal' compare (subtraction
) operations. For
30 ADC
, the representation becomes more complex still
, since we cannot
31 use the normal idiom of comparing the result to one of the input
32 operands
; instead we use CC_ADCmode to represent this case.
*/
36 CC_MODE (CC_NZC
); /* Only N
, Z and C bits of condition flags are valid.
37 (Used with SVE predicate tests.
) */
38 CC_MODE (CC_NZV
); /* Only N
, Z and V bits of condition flags are valid.
*/
39 CC_MODE (CC_NZ
); /* Only N and Z bits of condition flags are valid.
*/
40 CC_MODE (CC_Z
); /* Only Z bit of condition flags is valid.
*/
41 CC_MODE (CC_C
); /* C represents unsigned overflow of a simple addition.
*/
42 CC_MODE (CC_ADC
); /* Unsigned overflow from an
ADC (add with carry
).
*/
43 CC_MODE (CC_V
); /* Only V bit of condition flags is valid.
*/
45 /* Half
-precision floating point for __fp16.
*/
46 FLOAT_MODE (HF
, 2, 0);
47 ADJUST_FLOAT_FORMAT (HF
, &ieee_half_format
);
51 VECTOR_BOOL_MODE (VNx64BI
, 64, BI
, 8);
52 VECTOR_BOOL_MODE (VNx32BI
, 32, BI
, 4);
53 VECTOR_BOOL_MODE (VNx16BI
, 16, BI
, 2);
54 VECTOR_BOOL_MODE (VNx8BI
, 8, BI
, 2);
55 VECTOR_BOOL_MODE (VNx4BI
, 4, BI
, 2);
56 VECTOR_BOOL_MODE (VNx2BI
, 2, BI
, 2);
58 ADJUST_NUNITS (VNx64BI
, aarch64_sve_vg
* 32);
59 ADJUST_NUNITS (VNx32BI
, aarch64_sve_vg
* 16);
60 ADJUST_NUNITS (VNx16BI
, aarch64_sve_vg
* 8);
61 ADJUST_NUNITS (VNx8BI
, aarch64_sve_vg
* 4);
62 ADJUST_NUNITS (VNx4BI
, aarch64_sve_vg
* 2);
63 ADJUST_NUNITS (VNx2BI
, aarch64_sve_vg
);
65 ADJUST_ALIGNMENT (VNx64BI
, 2);
66 ADJUST_ALIGNMENT (VNx32BI
, 2);
67 ADJUST_ALIGNMENT (VNx16BI
, 2);
68 ADJUST_ALIGNMENT (VNx8BI
, 2);
69 ADJUST_ALIGNMENT (VNx4BI
, 2);
70 ADJUST_ALIGNMENT (VNx2BI
, 2);
73 FLOAT_MODE (BF
, 2, 0);
74 ADJUST_FLOAT_FORMAT (BF
, &arm_bfloat_half_format
);
76 VECTOR_MODES (INT
, 8); /* V8QI V4HI V2SI.
*/
77 VECTOR_MODES (INT
, 16); /* V16QI V8HI V4SI V2DI.
*/
78 VECTOR_MODES (FLOAT, 8); /* V2SF.
*/
79 VECTOR_MODES (FLOAT, 16); /* V4SF V2DF.
*/
80 VECTOR_MODE (INT
, DI
, 1); /* V1DI.
*/
81 VECTOR_MODE (FLOAT, DF
, 1); /* V1DF.
*/
82 VECTOR_MODE (FLOAT, HF
, 2); /* V2HF.
*/
85 /* Integer vector modes used to represent intermediate widened values in some
86 instructions. Not intended to be moved to and from registers or memory.
*/
87 VECTOR_MODE (INT
, HI
, 16); /* V16HI.
*/
88 VECTOR_MODE (INT
, SI
, 8); /* V8SI.
*/
89 VECTOR_MODE (INT
, DI
, 4); /* V4DI.
*/
90 VECTOR_MODE (INT
, TI
, 2); /* V2TI.
*/
92 /* Oct Int
: 256-bit integer mode needed for
32-byte vector arguments.
*/
95 /* Opaque integer modes for
3 or
4 Neon q
-registers
/ 6 or
8 Neon d
-registers
96 (2 d
-regs
= 1 q
-reg
= TImode
).
*/
101 VECTOR_MODE_WITH_PREFIX (V
, INT
, DI
, 8, 5);
102 ADJUST_ALIGNMENT (V8DI
, 8);
104 /* V2x4QImode. Used in load
/store pair patterns.
*/
105 VECTOR_MODE_WITH_PREFIX (V2x
, INT
, QI
, 4, 5);
106 ADJUST_NUNITS (V2x4QI
, 8);
107 ADJUST_ALIGNMENT (V2x4QI
, 4);
109 /* Define Advanced SIMD modes for structures of
2, 3 and
4 d
-registers.
*/
110 #define
ADV_SIMD_D_REG_STRUCT_MODES(NVECS
, VB
, VH
, VS
, VD
) \
111 VECTOR_MODES_WITH_PREFIX (V##NVECS##x
, INT
, 8, 3); \
112 VECTOR_MODES_WITH_PREFIX (V##NVECS##x
, FLOAT, 8, 3); \
113 VECTOR_MODE_WITH_PREFIX (V##NVECS##x
, FLOAT, DF
, 1, 3); \
114 VECTOR_MODE_WITH_PREFIX (V##NVECS##x
, INT
, DI
, 1, 3); \
116 ADJUST_NUNITS (VB##QI
, NVECS
* 8); \
117 ADJUST_NUNITS (VH##HI
, NVECS
* 4); \
118 ADJUST_NUNITS (VS##SI
, NVECS
* 2); \
119 ADJUST_NUNITS (VD##DI
, NVECS
); \
120 ADJUST_NUNITS (VH##BF
, NVECS
* 4); \
121 ADJUST_NUNITS (VH##HF
, NVECS
* 4); \
122 ADJUST_NUNITS (VS##SF
, NVECS
* 2); \
123 ADJUST_NUNITS (VD##DF
, NVECS
); \
125 ADJUST_ALIGNMENT (VB##QI
, 8); \
126 ADJUST_ALIGNMENT (VH##HI
, 8); \
127 ADJUST_ALIGNMENT (VS##SI
, 8); \
128 ADJUST_ALIGNMENT (VD##DI
, 8); \
129 ADJUST_ALIGNMENT (VH##BF
, 8); \
130 ADJUST_ALIGNMENT (VH##HF
, 8); \
131 ADJUST_ALIGNMENT (VS##SF
, 8); \
132 ADJUST_ALIGNMENT (VD##DF
, 8);
134 ADV_SIMD_D_REG_STRUCT_MODES (2, V2x8
, V2x4
, V2x2
, V2x1
)
135 ADV_SIMD_D_REG_STRUCT_MODES (3, V3x8
, V3x4
, V3x2
, V3x1
)
136 ADV_SIMD_D_REG_STRUCT_MODES (4, V4x8
, V4x4
, V4x2
, V4x1
)
138 /* Define Advanced SIMD modes for structures of
2, 3 and
4 q
-registers.
*/
139 #define
ADV_SIMD_Q_REG_STRUCT_MODES(NVECS
, VB
, VH
, VS
, VD
) \
140 VECTOR_MODES_WITH_PREFIX (V##NVECS##x
, INT
, 16, 3); \
141 VECTOR_MODES_WITH_PREFIX (V##NVECS##x
, FLOAT, 16, 3); \
143 ADJUST_NUNITS (VB##QI
, NVECS
* 16); \
144 ADJUST_NUNITS (VH##HI
, NVECS
* 8); \
145 ADJUST_NUNITS (VS##SI
, NVECS
* 4); \
146 ADJUST_NUNITS (VD##DI
, NVECS
* 2); \
147 ADJUST_NUNITS (VH##BF
, NVECS
* 8); \
148 ADJUST_NUNITS (VH##HF
, NVECS
* 8); \
149 ADJUST_NUNITS (VS##SF
, NVECS
* 4); \
150 ADJUST_NUNITS (VD##DF
, NVECS
* 2); \
152 ADJUST_ALIGNMENT (VB##QI
, 16); \
153 ADJUST_ALIGNMENT (VH##HI
, 16); \
154 ADJUST_ALIGNMENT (VS##SI
, 16); \
155 ADJUST_ALIGNMENT (VD##DI
, 16); \
156 ADJUST_ALIGNMENT (VH##BF
, 16); \
157 ADJUST_ALIGNMENT (VH##HF
, 16); \
158 ADJUST_ALIGNMENT (VS##SF
, 16); \
159 ADJUST_ALIGNMENT (VD##DF
, 16);
161 ADV_SIMD_Q_REG_STRUCT_MODES (2, V2x16
, V2x8
, V2x4
, V2x2
)
162 ADV_SIMD_Q_REG_STRUCT_MODES (3, V3x16
, V3x8
, V3x4
, V3x2
)
163 ADV_SIMD_Q_REG_STRUCT_MODES (4, V4x16
, V4x8
, V4x4
, V4x2
)
165 /* Define SVE modes for NVECS vectors. VB
, VH
, VS and VD are the prefixes
166 for
8-bit
, 16-bit
, 32-bit and
64-bit elements respectively. It isn
't
167 strictly necessary to set the alignment here, since the default would
168 be clamped to BIGGEST_ALIGNMENT anyhow, but it seems clearer. */
169 #define SVE_MODES(NVECS, VB, VH, VS, VD, VT) \
170 VECTOR_MODES_WITH_PREFIX (VNx, INT, 16 * NVECS, NVECS == 1 ? 1 : 4); \
171 VECTOR_MODES_WITH_PREFIX (VNx, FLOAT, 16 * NVECS, NVECS == 1 ? 1 : 4); \
173 ADJUST_NUNITS (VB##QI, aarch64_sve_vg * NVECS * 8); \
174 ADJUST_NUNITS (VH##HI, aarch64_sve_vg * NVECS * 4); \
175 ADJUST_NUNITS (VS##SI, aarch64_sve_vg * NVECS * 2); \
176 ADJUST_NUNITS (VD##DI, aarch64_sve_vg * NVECS); \
177 ADJUST_NUNITS (VT##TI, exact_div (aarch64_sve_vg * NVECS, 2)); \
178 ADJUST_NUNITS (VH##BF, aarch64_sve_vg * NVECS * 4); \
179 ADJUST_NUNITS (VH##HF, aarch64_sve_vg * NVECS * 4); \
180 ADJUST_NUNITS (VS##SF, aarch64_sve_vg * NVECS * 2); \
181 ADJUST_NUNITS (VD##DF, aarch64_sve_vg * NVECS); \
183 ADJUST_ALIGNMENT (VB##QI, 16); \
184 ADJUST_ALIGNMENT (VH##HI, 16); \
185 ADJUST_ALIGNMENT (VS##SI, 16); \
186 ADJUST_ALIGNMENT (VD##DI, 16); \
187 ADJUST_ALIGNMENT (VT##TI, 16); \
188 ADJUST_ALIGNMENT (VH##BF, 16); \
189 ADJUST_ALIGNMENT (VH##HF, 16); \
190 ADJUST_ALIGNMENT (VS##SF, 16); \
191 ADJUST_ALIGNMENT (VD##DF, 16);
193 /* Give SVE vectors names of the form VNxX, where X describes what is
194 stored in each 128-bit unit. The actual size of the mode depends
195 on command-line flags.
197 VNx1* aren't really native SVE modes
, but they can be useful in some
198 limited situations.
*/
199 VECTOR_MODE_WITH_PREFIX (VNx
, INT
, TI
, 1, 1);
200 SVE_MODES (1, VNx16
, VNx8
, VNx4
, VNx2
, VNx1
)
201 SVE_MODES (2, VNx32
, VNx16
, VNx8
, VNx4
, VNx2
)
202 SVE_MODES (3, VNx48
, VNx24
, VNx12
, VNx6
, VNx3
)
203 SVE_MODES (4, VNx64
, VNx32
, VNx16
, VNx8
, VNx4
)
205 /* Partial SVE vectors
:
212 In memory they occupy contiguous locations
, in the same way as fixed
-length
213 vectors. E.g. VNx8QImode is half the size of VNx16QImode.
215 Passing
2 as the final argument ensures that the modes come after all
216 other single
-vector modes in the GET_MODE_WIDER chain
, so that we never
217 pick them in preference to a full vector mode.
*/
218 VECTOR_MODE_WITH_PREFIX (VNx
, INT
, SI
, 1, 2);
219 VECTOR_MODE_WITH_PREFIX (VNx
, INT
, DI
, 1, 2);
220 VECTOR_MODES_WITH_PREFIX (VNx
, INT
, 2, 2);
221 VECTOR_MODES_WITH_PREFIX (VNx
, INT
, 4, 2);
222 VECTOR_MODES_WITH_PREFIX (VNx
, INT
, 8, 2);
223 VECTOR_MODES_WITH_PREFIX (VNx
, FLOAT, 4, 2);
224 VECTOR_MODES_WITH_PREFIX (VNx
, FLOAT, 8, 2);
226 ADJUST_NUNITS (VNx1SI
, exact_div (aarch64_sve_vg
, 2));
227 ADJUST_NUNITS (VNx1DI
, exact_div (aarch64_sve_vg
, 2));
229 ADJUST_NUNITS (VNx2QI
, aarch64_sve_vg
);
230 ADJUST_NUNITS (VNx2HI
, aarch64_sve_vg
);
231 ADJUST_NUNITS (VNx2SI
, aarch64_sve_vg
);
232 ADJUST_NUNITS (VNx2HF
, aarch64_sve_vg
);
233 ADJUST_NUNITS (VNx2BF
, aarch64_sve_vg
);
234 ADJUST_NUNITS (VNx2SF
, aarch64_sve_vg
);
236 ADJUST_NUNITS (VNx4QI
, aarch64_sve_vg
* 2);
237 ADJUST_NUNITS (VNx4HI
, aarch64_sve_vg
* 2);
238 ADJUST_NUNITS (VNx4HF
, aarch64_sve_vg
* 2);
239 ADJUST_NUNITS (VNx4BF
, aarch64_sve_vg
* 2);
241 ADJUST_NUNITS (VNx8QI
, aarch64_sve_vg
* 4);
243 ADJUST_ALIGNMENT (VNx2QI
, 1);
244 ADJUST_ALIGNMENT (VNx4QI
, 1);
245 ADJUST_ALIGNMENT (VNx8QI
, 1);
247 ADJUST_ALIGNMENT (VNx2HI
, 2);
248 ADJUST_ALIGNMENT (VNx4HI
, 2);
249 ADJUST_ALIGNMENT (VNx2HF
, 2);
250 ADJUST_ALIGNMENT (VNx2BF
, 2);
251 ADJUST_ALIGNMENT (VNx4HF
, 2);
252 ADJUST_ALIGNMENT (VNx4BF
, 2);
254 ADJUST_ALIGNMENT (VNx1SI
, 4);
255 ADJUST_ALIGNMENT (VNx2SI
, 4);
256 ADJUST_ALIGNMENT (VNx2SF
, 4);
258 ADJUST_ALIGNMENT (VNx1DI
, 8);
260 /* Quad float
: 128-bit floating mode for long doubles.
*/
261 FLOAT_MODE (TF
, 16, ieee_quad_format
);
263 /* A
4-tuple of SVE vectors with the maximum
-msve
-vector
-bits
= setting.
264 Note that this is a limit only on the compile
-time sizes of modes
;
265 it is not a limit on the runtime sizes
, since VL
-agnostic code
266 must work with arbitary vector lengths.
*/
267 #define
MAX_BITSIZE_MODE_ANY_MODE (2048 * 4)
269 /* Coefficient
1 is multiplied by the number of
128-bit chunks in an
270 SVE
vector (referred to as
"VQ") minus one.
*/
271 #define NUM_POLY_INT_COEFFS
2