1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2024 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump
= false;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array
[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array
[16] = {
102 /* The enumeration strings associated with each value of a 6-bit RPRFM
104 const char *const aarch64_rprfmop_array
[64] = {
113 /* Vector length multiples for a predicate-as-counter operand. Used in things
114 like AARCH64_OPND_SME_VLxN_10. */
115 const char *const aarch64_sme_vlxn_array
[2] = {
120 /* Values accepted by the brb alias. */
121 const char *const aarch64_brbop_array
[] = {
126 /* Helper functions to determine which operand to be used to encode/decode
127 the size:Q fields for AdvSIMD instructions. */
130 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
132 return (qualifier
>= AARCH64_OPND_QLF_V_8B
133 && qualifier
<= AARCH64_OPND_QLF_V_1Q
);
137 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
139 return (qualifier
>= AARCH64_OPND_QLF_S_B
140 && qualifier
<= AARCH64_OPND_QLF_S_Q
);
149 DP_VECTOR_ACROSS_LANES
,
152 static const char significant_operand_index
[] =
154 0, /* DP_UNKNOWN, by default using operand 0. */
155 0, /* DP_VECTOR_3SAME */
156 1, /* DP_VECTOR_LONG */
157 2, /* DP_VECTOR_WIDE */
158 1, /* DP_VECTOR_ACROSS_LANES */
161 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
163 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
164 corresponds to one of a sequence of operands. */
166 static enum data_pattern
167 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
169 if (vector_qualifier_p (qualifiers
[0]))
171 /* e.g. v.4s, v.4s, v.4s
172 or v.4h, v.4h, v.h[3]. */
173 if (qualifiers
[0] == qualifiers
[1]
174 && vector_qualifier_p (qualifiers
[2])
175 && (aarch64_get_qualifier_esize (qualifiers
[0])
176 == aarch64_get_qualifier_esize (qualifiers
[1]))
177 && (aarch64_get_qualifier_esize (qualifiers
[0])
178 == aarch64_get_qualifier_esize (qualifiers
[2])))
179 return DP_VECTOR_3SAME
;
180 /* e.g. v.8h, v.8b, v.8b.
181 or v.4s, v.4h, v.h[2].
183 if (vector_qualifier_p (qualifiers
[1])
184 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
185 && (aarch64_get_qualifier_esize (qualifiers
[0])
186 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
187 return DP_VECTOR_LONG
;
188 /* e.g. v.8h, v.8h, v.8b. */
189 if (qualifiers
[0] == qualifiers
[1]
190 && vector_qualifier_p (qualifiers
[2])
191 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
192 && (aarch64_get_qualifier_esize (qualifiers
[0])
193 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
194 && (aarch64_get_qualifier_esize (qualifiers
[0])
195 == aarch64_get_qualifier_esize (qualifiers
[1])))
196 return DP_VECTOR_WIDE
;
198 else if (fp_qualifier_p (qualifiers
[0]))
200 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
201 if (vector_qualifier_p (qualifiers
[1])
202 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
203 return DP_VECTOR_ACROSS_LANES
;
209 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
210 the AdvSIMD instructions. */
211 /* N.B. it is possible to do some optimization that doesn't call
212 get_data_pattern each time when we need to select an operand. We can
213 either buffer the caculated the result or statically generate the data,
214 however, it is not obvious that the optimization will bring significant
218 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
221 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
224 /* Instruction bit-fields.
225 + Keep synced with 'enum aarch64_field_kind'. */
226 const aarch64_field fields
[] =
229 { 8, 4 }, /* CRm: in the system instructions. */
230 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
231 { 12, 4 }, /* CRn: in the system instructions. */
232 { 10, 8 }, /* CSSC_imm8. */
233 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
234 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
235 { 0, 5 }, /* LSE128_Rt: Shared input+output operand register. */
236 { 16, 5 }, /* LSE128_Rt2: Shared input+output operand register 2. */
237 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
238 { 22, 1 }, /* N: in logical (immediate) instructions. */
239 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
240 { 10, 5 }, /* Ra: in fp instructions. */
241 { 0, 5 }, /* Rd: in many integer instructions. */
242 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
243 { 5, 5 }, /* Rn: in many integer instructions. */
244 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
245 { 0, 5 }, /* Rt: in load/store instructions. */
246 { 10, 5 }, /* Rt2: in load/store pair instructions. */
247 { 12, 1 }, /* S: in load/store reg offset instructions. */
248 { 12, 2 }, /* SM3_imm2: Indexed element SM3 2 bits index immediate. */
249 { 1, 3 }, /* SME_Pdx2: predicate register, multiple of 2, [3:1]. */
250 { 13, 3 }, /* SME_Pm: second source scalable predicate register P0-P7. */
251 { 0, 3 }, /* SME_PNd3: PN0-PN7, bits [2:0]. */
252 { 5, 3 }, /* SME_PNn3: PN0-PN7, bits [7:5]. */
253 { 16, 1 }, /* SME_Q: Q class bit, bit 16. */
254 { 16, 2 }, /* SME_Rm: index base register W12-W15 [17:16]. */
255 { 13, 2 }, /* SME_Rv: vector select register W12-W15, bits [14:13]. */
256 { 15, 1 }, /* SME_V: (horizontal / vertical tiles), bit 15. */
257 { 10, 1 }, /* SME_VL_10: VLx2 or VLx4, bit [10]. */
258 { 13, 1 }, /* SME_VL_13: VLx2 or VLx4, bit [13]. */
259 { 0, 1 }, /* SME_ZAda_1b: tile ZA0-ZA1. */
260 { 0, 2 }, /* SME_ZAda_2b: tile ZA0-ZA3. */
261 { 0, 3 }, /* SME_ZAda_3b: tile ZA0-ZA7. */
262 { 4, 1 }, /* SME_ZdnT: upper bit of Zt, bit [4]. */
263 { 1, 4 }, /* SME_Zdn2: Z0-Z31, multiple of 2, bits [4:1]. */
264 { 0, 2 }, /* SME_Zdn2_0: lower 2 bits of Zt, bits [1:0]. */
265 { 2, 3 }, /* SME_Zdn4: Z0-Z31, multiple of 4, bits [4:2]. */
266 { 16, 4 }, /* SME_Zm: Z0-Z15, bits [19:16]. */
267 { 17, 4 }, /* SME_Zm2: Z0-Z31, multiple of 2, bits [20:17]. */
268 { 18, 3 }, /* SME_Zm4: Z0-Z31, multiple of 4, bits [20:18]. */
269 { 6, 4 }, /* SME_Zn2: Z0-Z31, multiple of 2, bits [9:6]. */
270 { 7, 3 }, /* SME_Zn4: Z0-Z31, multiple of 4, bits [9:7]. */
271 { 4, 1 }, /* SME_ZtT: upper bit of Zt, bit [4]. */
272 { 0, 3 }, /* SME_Zt3: lower 3 bits of Zt, bits [2:0]. */
273 { 0, 2 }, /* SME_Zt2: lower 2 bits of Zt, bits [1:0]. */
274 { 23, 1 }, /* SME_i1: immediate field, bit 23. */
275 { 12, 2 }, /* SME_size_12: bits [13:12]. */
276 { 22, 2 }, /* SME_size_22: size<1>, size<0> class field, [23:22]. */
277 { 23, 1 }, /* SME_sz_23: bit [23]. */
278 { 22, 1 }, /* SME_tszh: immediate and qualifier field, bit 22. */
279 { 18, 3 }, /* SME_tszl: immediate and qualifier field, bits [20:18]. */
280 { 0, 8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0]. */
281 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
282 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
283 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
284 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
285 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
286 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
287 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
288 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
289 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
290 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
291 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
292 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
293 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
294 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
295 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
296 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
297 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
298 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
299 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
300 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
301 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
302 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
303 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
304 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
305 { 5, 1 }, /* SVE_i1: single-bit immediate. */
306 { 23, 1 }, /* SVE_i1_23: single-bit immediate. */
307 { 22, 2 }, /* SVE_i2: 2-bit index, bits [23,22]. */
308 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
309 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
310 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
311 { 22, 2 }, /* SVE_i3h3: two high bits of 3bit immediate, bits [22,23]. */
312 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
313 { 12, 1 }, /* SVE_i3l2: low bit of 3-bit immediate, bit 12. */
314 { 10, 2 }, /* SVE_i4l2: two low bits of 4bit immediate, bits [11,10]. */
315 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
316 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
317 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
318 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
319 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
320 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
321 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
322 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
323 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
324 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
325 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
326 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
327 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
328 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
329 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
330 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
331 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
332 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
333 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
334 { 16, 4 }, /* SVE_tsz: triangular size select. */
335 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
336 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
337 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
338 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
339 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
340 { 22, 1 }, /* S_imm10: in LDRAA and LDRAB instructions. */
341 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
342 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
343 { 19, 5 }, /* b40: in the test bit and branch instructions. */
344 { 31, 1 }, /* b5: in the test bit and branch instructions. */
345 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
346 { 12, 4 }, /* cond: condition flags as a source operand. */
347 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
348 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
349 { 21, 2 }, /* hw: in move wide constant instructions. */
350 { 0, 1 }, /* imm1_0: general immediate in bits [0]. */
351 { 2, 1 }, /* imm1_2: general immediate in bits [2]. */
352 { 3, 1 }, /* imm1_3: general immediate in bits [3]. */
353 { 8, 1 }, /* imm1_8: general immediate in bits [8]. */
354 { 10, 1 }, /* imm1_10: general immediate in bits [10]. */
355 { 14, 1 }, /* imm1_14: general immediate in bits [14]. */
356 { 15, 1 }, /* imm1_15: general immediate in bits [15]. */
357 { 16, 1 }, /* imm1_16: general immediate in bits [16]. */
358 { 0, 2 }, /* imm2_0: general immediate in bits [1:0]. */
359 { 1, 2 }, /* imm2_1: general immediate in bits [2:1]. */
360 { 2, 2 }, /* imm2_2: general immediate in bits [3:2]. */
361 { 8, 2 }, /* imm2_8: general immediate in bits [9:8]. */
362 { 10, 2 }, /* imm2_10: 2-bit immediate, bits [11:10] */
363 { 12, 2 }, /* imm2_12: 2-bit immediate, bits [13:12] */
364 { 13, 2 }, /* imm2_13: 2-bit immediate, bits [14:13] */
365 { 15, 2 }, /* imm2_15: 2-bit immediate, bits [16:15] */
366 { 16, 2 }, /* imm2_16: 2-bit immediate, bits [17:16] */
367 { 19, 2 }, /* imm2_19: 2-bit immediate, bits [20:19] */
368 { 0, 3 }, /* imm3_0: general immediate in bits [2:0]. */
369 { 5, 3 }, /* imm3_5: general immediate in bits [7:5]. */
370 { 10, 3 }, /* imm3_10: in add/sub extended reg instructions. */
371 { 12, 3 }, /* imm3_12: general immediate in bits [14:12]. */
372 { 14, 3 }, /* imm3_14: general immediate in bits [16:14]. */
373 { 15, 3 }, /* imm3_15: general immediate in bits [17:15]. */
374 { 19, 3 }, /* imm3_19: general immediate in bits [21:19]. */
375 { 0, 4 }, /* imm4_0: in rmif instructions. */
376 { 5, 4 }, /* imm4_5: in SME instructions. */
377 { 10, 4 }, /* imm4_10: in adddg/subg instructions. */
378 { 11, 4 }, /* imm4_11: in advsimd ext and advsimd ins instructions. */
379 { 14, 4 }, /* imm4_14: general immediate in bits [17:14]. */
380 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
381 { 10, 6 }, /* imm6_10: in add/sub reg shifted instructions. */
382 { 15, 6 }, /* imm6_15: in rmif instructions. */
383 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
384 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
385 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
386 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
387 { 5, 14 }, /* imm14: in test bit and branch instructions. */
388 { 0, 16 }, /* imm16_0: in udf instruction. */
389 { 5, 16 }, /* imm16_5: in exception instructions. */
390 { 17, 1 }, /* imm17_1: in 1 bit element index. */
391 { 17, 2 }, /* imm17_2: in 2 bits element index. */
392 { 5, 19 }, /* imm19: e.g. in CBZ. */
393 { 0, 26 }, /* imm26: in unconditional branch instructions. */
394 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
395 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
396 { 5, 19 }, /* immhi: e.g. in ADRP. */
397 { 29, 2 }, /* immlo: e.g. in ADRP. */
398 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
399 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
400 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
401 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
402 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
403 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
404 { 30, 1 }, /* lse_sz: in LSE extension atomic instructions. */
405 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
406 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
407 { 19, 2 }, /* op0: in the system instructions. */
408 { 16, 3 }, /* op1: in the system instructions. */
409 { 5, 3 }, /* op2: in the system instructions. */
410 { 22, 2 }, /* opc: in load/store reg offset instructions. */
411 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
412 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
413 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
414 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
415 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
416 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
417 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
418 { 31, 1 }, /* sf: in integer data processing instructions. */
419 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
420 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
421 { 22, 1 }, /* sz: 1-bit element size select. */
422 { 22, 2 }, /* type: floating point type field in fp data inst. */
423 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
424 { 5, 3 }, /* off3: immediate offset used to calculate slice number in a
426 { 5, 2 }, /* off2: immediate offset used to calculate slice number in
428 { 7, 1 }, /* ZAn_1: name of the 1bit encoded ZA tile. */
429 { 5, 1 }, /* ol: immediate offset used to calculate slice number in a ZA
431 { 6, 2 }, /* ZAn_2: name of the 2bit encoded ZA tile. */
432 { 5, 3 }, /* ZAn_3: name of the 3bit encoded ZA tile. */
433 { 6, 1 }, /* ZAn: name of the bit encoded ZA tile. */
434 { 12, 4 }, /* opc2: in rcpc3 ld/st inst deciding the pre/post-index. */
435 { 30, 2 }, /* rcpc3_size: in rcpc3 ld/st, field controls Rt/Rt2 width. */
436 { 5, 1 }, /* FLD_brbop: used in BRB to mean IALL or INJ. */
437 { 8, 1 }, /* ZA8_1: name of the 1 bit encoded ZA tile ZA0-ZA1. */
438 { 7, 2 }, /* ZA7_2: name of the 2 bits encoded ZA tile ZA0-ZA3. */
439 { 6, 3 }, /* ZA6_3: name of the 3 bits encoded ZA tile ZA0-ZA7. */
440 { 5, 4 }, /* ZA5_4: name of the 4 bits encoded ZA tile ZA0-ZA15. */
443 enum aarch64_operand_class
444 aarch64_get_operand_class (enum aarch64_opnd type
)
446 return aarch64_operands
[type
].op_class
;
450 aarch64_get_operand_name (enum aarch64_opnd type
)
452 return aarch64_operands
[type
].name
;
455 /* Get operand description string.
456 This is usually for the diagnosis purpose. */
458 aarch64_get_operand_desc (enum aarch64_opnd type
)
460 return aarch64_operands
[type
].desc
;
463 /* Table of all conditional affixes. */
464 const aarch64_cond aarch64_conds
[16] =
466 {{"eq", "none"}, 0x0},
467 {{"ne", "any"}, 0x1},
468 {{"cs", "hs", "nlast"}, 0x2},
469 {{"cc", "lo", "ul", "last"}, 0x3},
470 {{"mi", "first"}, 0x4},
471 {{"pl", "nfrst"}, 0x5},
474 {{"hi", "pmore"}, 0x8},
475 {{"ls", "plast"}, 0x9},
476 {{"ge", "tcont"}, 0xa},
477 {{"lt", "tstop"}, 0xb},
485 get_cond_from_value (aarch64_insn value
)
488 return &aarch64_conds
[(unsigned int) value
];
492 get_inverted_cond (const aarch64_cond
*cond
)
494 return &aarch64_conds
[cond
->value
^ 0x1];
497 /* Table describing the operand extension/shifting operators; indexed by
498 enum aarch64_modifier_kind.
500 The value column provides the most common values for encoding modifiers,
501 which enables table-driven encoding/decoding for the modifiers. */
502 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
523 enum aarch64_modifier_kind
524 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
526 return desc
- aarch64_operand_modifiers
;
530 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
532 return aarch64_operand_modifiers
[kind
].value
;
535 enum aarch64_modifier_kind
536 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
540 return AARCH64_MOD_UXTB
+ value
;
542 return AARCH64_MOD_LSL
- value
;
546 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
548 return kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
;
552 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
554 return kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
;
557 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
577 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options
[4] =
578 { /* CRm<3:2> #imm */
579 { "oshnxs", 16 }, /* 00 16 */
580 { "nshnxs", 20 }, /* 01 20 */
581 { "ishnxs", 24 }, /* 10 24 */
582 { "synxs", 28 }, /* 11 28 */
585 /* Table describing the operands supported by the aliases of the HINT
588 The name column is the operand that is accepted for the alias. The value
589 column is the hint number of the alias. The list of operands is terminated
590 by NULL in the name column. */
592 const struct aarch64_name_value_pair aarch64_hint_options
[] =
594 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
595 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT
, 0x20) },
596 { "csync", HINT_OPD_CSYNC
}, /* PSB CSYNC. */
597 { "dsync", HINT_OPD_DSYNC
}, /* GCSB DSYNC. */
598 { "c", HINT_OPD_C
}, /* BTI C. */
599 { "j", HINT_OPD_J
}, /* BTI J. */
600 { "jc", HINT_OPD_JC
}, /* BTI JC. */
601 { NULL
, HINT_OPD_NULL
},
604 /* op -> op: load = 0 instruction = 1 store = 2
606 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
607 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
608 const struct aarch64_name_value_pair aarch64_prfops
[32] =
610 { "pldl1keep", B(0, 1, 0) },
611 { "pldl1strm", B(0, 1, 1) },
612 { "pldl2keep", B(0, 2, 0) },
613 { "pldl2strm", B(0, 2, 1) },
614 { "pldl3keep", B(0, 3, 0) },
615 { "pldl3strm", B(0, 3, 1) },
616 { "pldslckeep", B(0, 4, 0) },
617 { "pldslcstrm", B(0, 4, 1) },
618 { "plil1keep", B(1, 1, 0) },
619 { "plil1strm", B(1, 1, 1) },
620 { "plil2keep", B(1, 2, 0) },
621 { "plil2strm", B(1, 2, 1) },
622 { "plil3keep", B(1, 3, 0) },
623 { "plil3strm", B(1, 3, 1) },
624 { "plislckeep", B(1, 4, 0) },
625 { "plislcstrm", B(1, 4, 1) },
626 { "pstl1keep", B(2, 1, 0) },
627 { "pstl1strm", B(2, 1, 1) },
628 { "pstl2keep", B(2, 2, 0) },
629 { "pstl2strm", B(2, 2, 1) },
630 { "pstl3keep", B(2, 3, 0) },
631 { "pstl3strm", B(2, 3, 1) },
632 { "pstslckeep", B(2, 4, 0) },
633 { "pstslcstrm", B(2, 4, 1) },
645 /* Utilities on value constraint. */
648 value_in_range_p (int64_t value
, int low
, int high
)
650 return (value
>= low
&& value
<= high
) ? 1 : 0;
653 /* Return true if VALUE is a multiple of ALIGN. */
655 value_aligned_p (int64_t value
, int align
)
657 return (value
% align
) == 0;
660 /* A signed value fits in a field. */
662 value_fit_signed_field_p (int64_t value
, unsigned width
)
665 if (width
< sizeof (value
) * 8)
667 int64_t lim
= (uint64_t) 1 << (width
- 1);
668 if (value
>= -lim
&& value
< lim
)
674 /* An unsigned value fits in a field. */
676 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
679 if (width
< sizeof (value
) * 8)
681 int64_t lim
= (uint64_t) 1 << width
;
682 if (value
>= 0 && value
< lim
)
688 /* Return 1 if OPERAND is SP or WSP. */
690 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
692 return ((aarch64_get_operand_class (operand
->type
)
693 == AARCH64_OPND_CLASS_INT_REG
)
694 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
695 && operand
->reg
.regno
== 31);
698 /* Return 1 if OPERAND is XZR or WZP. */
700 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
702 return ((aarch64_get_operand_class (operand
->type
)
703 == AARCH64_OPND_CLASS_INT_REG
)
704 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
705 && operand
->reg
.regno
== 31);
708 /* Return true if the operand *OPERAND that has the operand code
709 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
710 qualified by the qualifier TARGET. */
713 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
714 aarch64_opnd_qualifier_t target
)
716 switch (operand
->qualifier
)
718 case AARCH64_OPND_QLF_W
:
719 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
722 case AARCH64_OPND_QLF_X
:
723 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
726 case AARCH64_OPND_QLF_WSP
:
727 if (target
== AARCH64_OPND_QLF_W
728 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
731 case AARCH64_OPND_QLF_SP
:
732 if (target
== AARCH64_OPND_QLF_X
733 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
743 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
744 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
746 Return NIL if more than one expected qualifiers are found. */
748 aarch64_opnd_qualifier_t
749 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
751 const aarch64_opnd_qualifier_t known_qlf
,
758 When the known qualifier is NIL, we have to assume that there is only
759 one qualifier sequence in the *QSEQ_LIST and return the corresponding
760 qualifier directly. One scenario is that for instruction
761 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
762 which has only one possible valid qualifier sequence
764 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
765 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
767 Because the qualifier NIL has dual roles in the qualifier sequence:
768 it can mean no qualifier for the operand, or the qualifer sequence is
769 not in use (when all qualifiers in the sequence are NILs), we have to
770 handle this special case here. */
771 if (known_qlf
== AARCH64_OPND_NIL
)
773 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
774 return qseq_list
[0][idx
];
777 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
779 if (qseq_list
[i
][known_idx
] == known_qlf
)
782 /* More than one sequences are found to have KNOWN_QLF at
784 return AARCH64_OPND_NIL
;
789 return qseq_list
[saved_i
][idx
];
792 enum operand_qualifier_kind
800 /* Operand qualifier description. */
801 struct operand_qualifier_data
803 /* The usage of the three data fields depends on the qualifier kind. */
810 enum operand_qualifier_kind kind
;
813 /* Indexed by the operand qualifier enumerators. */
814 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
816 {0, 0, 0, "NIL", OQK_NIL
},
818 /* Operand variant qualifiers.
820 element size, number of elements and common value for encoding. */
822 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
823 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
824 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
825 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
827 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
828 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
829 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
830 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
831 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
832 {2, 1, 0x0, "2b", OQK_OPD_VARIANT
},
833 {4, 1, 0x0, "4b", OQK_OPD_VARIANT
},
834 {4, 1, 0x0, "2h", OQK_OPD_VARIANT
},
836 {1, 4, 0x0, "4b", OQK_OPD_VARIANT
},
837 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
838 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
839 {2, 2, 0x0, "2h", OQK_OPD_VARIANT
},
840 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
841 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
842 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
843 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
844 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
845 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
846 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
848 {0, 0, 0, "z", OQK_OPD_VARIANT
},
849 {0, 0, 0, "m", OQK_OPD_VARIANT
},
851 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
852 {16, 0, 0, "tag", OQK_OPD_VARIANT
},
854 /* Qualifiers constraining the value range.
856 Lower bound, higher bound, unused. */
858 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE
},
859 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
860 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
861 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
862 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
863 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
864 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
866 /* Qualifiers for miscellaneous purpose.
868 unused, unused and unused. */
873 {0, 0, 0, "retrieving", 0},
877 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
879 return aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
;
883 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
885 return aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
;
889 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
891 return aarch64_opnd_qualifiers
[qualifier
].desc
;
894 /* Given an operand qualifier, return the expected data element size
895 of a qualified operand. */
897 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
899 assert (operand_variant_qualifier_p (qualifier
));
900 return aarch64_opnd_qualifiers
[qualifier
].data0
;
904 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
906 assert (operand_variant_qualifier_p (qualifier
));
907 return aarch64_opnd_qualifiers
[qualifier
].data1
;
911 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
913 assert (operand_variant_qualifier_p (qualifier
));
914 return aarch64_opnd_qualifiers
[qualifier
].data2
;
918 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
920 assert (qualifier_value_in_range_constraint_p (qualifier
));
921 return aarch64_opnd_qualifiers
[qualifier
].data0
;
925 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
927 assert (qualifier_value_in_range_constraint_p (qualifier
));
928 return aarch64_opnd_qualifiers
[qualifier
].data1
;
933 aarch64_verbose (const char *str
, ...)
944 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
948 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
949 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
954 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
955 const aarch64_opnd_qualifier_t
*qualifier
)
958 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
960 aarch64_verbose ("dump_match_qualifiers:");
961 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
962 curr
[i
] = opnd
[i
].qualifier
;
963 dump_qualifier_sequence (curr
);
964 aarch64_verbose ("against");
965 dump_qualifier_sequence (qualifier
);
967 #endif /* DEBUG_AARCH64 */
969 /* This function checks if the given instruction INSN is a destructive
970 instruction based on the usage of the registers. It does not recognize
971 unary destructive instructions. */
973 aarch64_is_destructive_by_operands (const aarch64_opcode
*opcode
)
976 const enum aarch64_opnd
*opnds
= opcode
->operands
;
978 if (opnds
[0] == AARCH64_OPND_NIL
)
981 while (opnds
[++i
] != AARCH64_OPND_NIL
)
982 if (opnds
[i
] == opnds
[0])
988 /* TODO improve this, we can have an extra field at the runtime to
989 store the number of operands rather than calculating it every time. */
992 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
995 const enum aarch64_opnd
*opnds
= opcode
->operands
;
996 while (opnds
[i
++] != AARCH64_OPND_NIL
)
999 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
1003 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
1004 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
1006 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
1007 This is always 0 if the function succeeds.
1009 N.B. on the entry, it is very likely that only some operands in *INST
1010 have had their qualifiers been established.
1012 If STOP_AT is not -1, the function will only try to match
1013 the qualifier sequence for operands before and including the operand
1014 of index STOP_AT; and on success *RET will only be filled with the first
1015 (STOP_AT+1) qualifiers.
1017 A couple examples of the matching algorithm:
1019 X,W,NIL should match
1022 NIL,NIL should match
1025 Apart from serving the main encoding routine, this can also be called
1026 during or after the operand decoding. */
1029 aarch64_find_best_match (const aarch64_inst
*inst
,
1030 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
1031 int stop_at
, aarch64_opnd_qualifier_t
*ret
,
1034 int i
, num_opnds
, invalid
, min_invalid
;
1035 const aarch64_opnd_qualifier_t
*qualifiers
;
1037 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
1040 DEBUG_TRACE ("SUCCEED: no operand");
1045 if (stop_at
< 0 || stop_at
>= num_opnds
)
1046 stop_at
= num_opnds
- 1;
1048 /* For each pattern. */
1049 min_invalid
= num_opnds
;
1050 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
1053 qualifiers
= *qualifiers_list
;
1055 /* Start as positive. */
1058 DEBUG_TRACE ("%d", i
);
1059 #ifdef DEBUG_AARCH64
1061 dump_match_qualifiers (inst
->operands
, qualifiers
);
1064 /* The first entry should be taken literally, even if it's an empty
1065 qualifier sequence. (This matters for strict testing.) In other
1066 positions an empty sequence acts as a terminator. */
1067 if (i
> 0 && empty_qualifier_sequence_p (qualifiers
))
1070 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
1072 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
1073 && !(inst
->opcode
->flags
& F_STRICT
))
1075 /* Either the operand does not have qualifier, or the qualifier
1076 for the operand needs to be deduced from the qualifier
1078 In the latter case, any constraint checking related with
1079 the obtained qualifier should be done later in
1080 operand_general_constraint_met_p. */
1083 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
1085 /* Unless the target qualifier can also qualify the operand
1086 (which has already had a non-nil qualifier), non-equal
1087 qualifiers are generally un-matched. */
1088 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
1094 continue; /* Equal qualifiers are certainly matched. */
1097 if (min_invalid
> invalid
)
1098 min_invalid
= invalid
;
1100 /* Qualifiers established. */
1101 if (min_invalid
== 0)
1105 *invalid_count
= min_invalid
;
1106 if (min_invalid
== 0)
1108 /* Fill the result in *RET. */
1110 qualifiers
= *qualifiers_list
;
1112 DEBUG_TRACE ("complete qualifiers using list %d", i
);
1113 #ifdef DEBUG_AARCH64
1115 dump_qualifier_sequence (qualifiers
);
1118 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
1119 ret
[j
] = *qualifiers
;
1120 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
1121 ret
[j
] = AARCH64_OPND_QLF_NIL
;
1123 DEBUG_TRACE ("SUCCESS");
1127 DEBUG_TRACE ("FAIL");
1131 /* Operand qualifier matching and resolving.
1133 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1134 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1136 Store the smallest number of non-matching qualifiers in *INVALID_COUNT.
1137 This is always 0 if the function succeeds.
1139 if UPDATE_P, update the qualifier(s) in *INST after the matching
1143 match_operands_qualifier (aarch64_inst
*inst
, bool update_p
,
1147 aarch64_opnd_qualifier_seq_t qualifiers
;
1149 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
1150 qualifiers
, invalid_count
))
1152 DEBUG_TRACE ("matching FAIL");
1156 /* Update the qualifiers. */
1158 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1160 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
1162 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
1163 "update %s with %s for operand %d",
1164 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
1165 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
1166 inst
->operands
[i
].qualifier
= qualifiers
[i
];
1169 DEBUG_TRACE ("matching SUCCESS");
1173 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1176 IS32 indicates whether value is a 32-bit immediate or not.
1177 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1178 amount will be returned in *SHIFT_AMOUNT. */
1181 aarch64_wide_constant_p (uint64_t value
, int is32
, unsigned int *shift_amount
)
1185 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1189 /* Allow all zeros or all ones in top 32-bits, so that
1190 32-bit constant expressions like ~0x80000000 are
1192 if (value
>> 32 != 0 && value
>> 32 != 0xffffffff)
1193 /* Immediate out of range. */
1195 value
&= 0xffffffff;
1198 /* first, try movz then movn */
1200 if ((value
& ((uint64_t) 0xffff << 0)) == value
)
1202 else if ((value
& ((uint64_t) 0xffff << 16)) == value
)
1204 else if (!is32
&& (value
& ((uint64_t) 0xffff << 32)) == value
)
1206 else if (!is32
&& (value
& ((uint64_t) 0xffff << 48)) == value
)
1211 DEBUG_TRACE ("exit false with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1215 if (shift_amount
!= NULL
)
1216 *shift_amount
= amount
;
1218 DEBUG_TRACE ("exit true with amount %d", amount
);
1223 /* Build the accepted values for immediate logical SIMD instructions.
1225 The standard encodings of the immediate value are:
1226 N imms immr SIMD size R S
1227 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1228 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1229 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1230 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1231 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1232 0 11110s 00000r 2 UInt(r) UInt(s)
1233 where all-ones value of S is reserved.
1235 Let's call E the SIMD size.
1237 The immediate value is: S+1 bits '1' rotated to the right by R.
1239 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1240 (remember S != E - 1). */
1242 #define TOTAL_IMM_NB 5334
1247 aarch64_insn encoding
;
1248 } simd_imm_encoding
;
1250 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
1253 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
1255 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
1256 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
1258 if (imm1
->imm
< imm2
->imm
)
1260 if (imm1
->imm
> imm2
->imm
)
1265 /* immediate bitfield standard encoding
1266 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1267 1 ssssss rrrrrr 64 rrrrrr ssssss
1268 0 0sssss 0rrrrr 32 rrrrr sssss
1269 0 10ssss 00rrrr 16 rrrr ssss
1270 0 110sss 000rrr 8 rrr sss
1271 0 1110ss 0000rr 4 rr ss
1272 0 11110s 00000r 2 r s */
1274 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
1276 return (is64
<< 12) | (r
<< 6) | s
;
1280 build_immediate_table (void)
1282 uint32_t log_e
, e
, s
, r
, s_mask
;
1288 for (log_e
= 1; log_e
<= 6; log_e
++)
1290 /* Get element size. */
1295 mask
= 0xffffffffffffffffull
;
1301 mask
= (1ull << e
) - 1;
1303 1 ((1 << 4) - 1) << 2 = 111100
1304 2 ((1 << 3) - 1) << 3 = 111000
1305 3 ((1 << 2) - 1) << 4 = 110000
1306 4 ((1 << 1) - 1) << 5 = 100000
1307 5 ((1 << 0) - 1) << 6 = 000000 */
1308 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1310 for (s
= 0; s
< e
- 1; s
++)
1311 for (r
= 0; r
< e
; r
++)
1313 /* s+1 consecutive bits to 1 (s < 63) */
1314 imm
= (1ull << (s
+ 1)) - 1;
1315 /* rotate right by r */
1317 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1318 /* replicate the constant depending on SIMD size */
1321 case 1: imm
= (imm
<< 2) | imm
;
1323 case 2: imm
= (imm
<< 4) | imm
;
1325 case 3: imm
= (imm
<< 8) | imm
;
1327 case 4: imm
= (imm
<< 16) | imm
;
1329 case 5: imm
= (imm
<< 32) | imm
;
1334 simd_immediates
[nb_imms
].imm
= imm
;
1335 simd_immediates
[nb_imms
].encoding
=
1336 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1340 assert (nb_imms
== TOTAL_IMM_NB
);
1341 qsort(simd_immediates
, nb_imms
,
1342 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1345 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1346 be accepted by logical (immediate) instructions
1347 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1349 ESIZE is the number of bytes in the decoded immediate value.
1350 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1351 VALUE will be returned in *ENCODING. */
1354 aarch64_logical_immediate_p (uint64_t value
, int esize
, aarch64_insn
*encoding
)
1356 simd_imm_encoding imm_enc
;
1357 const simd_imm_encoding
*imm_encoding
;
1358 static bool initialized
= false;
1362 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), esize: %d", value
,
1367 build_immediate_table ();
1371 /* Allow all zeros or all ones in top bits, so that
1372 constant expressions like ~1 are permitted. */
1373 upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
1374 if ((value
& ~upper
) != value
&& (value
| upper
) != value
)
1377 /* Replicate to a full 64-bit value. */
1379 for (i
= esize
* 8; i
< 64; i
*= 2)
1380 value
|= (value
<< i
);
1382 imm_enc
.imm
= value
;
1383 imm_encoding
= (const simd_imm_encoding
*)
1384 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1385 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1386 if (imm_encoding
== NULL
)
1388 DEBUG_TRACE ("exit with false");
1391 if (encoding
!= NULL
)
1392 *encoding
= imm_encoding
->encoding
;
1393 DEBUG_TRACE ("exit with true");
1397 /* If 64-bit immediate IMM is in the format of
1398 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1399 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1400 of value "abcdefgh". Otherwise return -1. */
1402 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1408 for (i
= 0; i
< 8; i
++)
1410 byte
= (imm
>> (8 * i
)) & 0xff;
1413 else if (byte
!= 0x00)
1419 /* Utility inline functions for operand_general_constraint_met_p. */
1422 set_error (aarch64_operand_error
*mismatch_detail
,
1423 enum aarch64_operand_error_kind kind
, int idx
,
1426 if (mismatch_detail
== NULL
)
1428 mismatch_detail
->kind
= kind
;
1429 mismatch_detail
->index
= idx
;
1430 mismatch_detail
->error
= error
;
1434 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1437 if (mismatch_detail
== NULL
)
1439 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1443 set_invalid_regno_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1444 const char *prefix
, int lower_bound
, int upper_bound
)
1446 if (mismatch_detail
== NULL
)
1448 set_error (mismatch_detail
, AARCH64_OPDE_INVALID_REGNO
, idx
, NULL
);
1449 mismatch_detail
->data
[0].s
= prefix
;
1450 mismatch_detail
->data
[1].i
= lower_bound
;
1451 mismatch_detail
->data
[2].i
= upper_bound
;
1455 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1456 int idx
, int lower_bound
, int upper_bound
,
1459 if (mismatch_detail
== NULL
)
1461 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1462 mismatch_detail
->data
[0].i
= lower_bound
;
1463 mismatch_detail
->data
[1].i
= upper_bound
;
1467 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1468 int idx
, int lower_bound
, int upper_bound
)
1470 if (mismatch_detail
== NULL
)
1472 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1473 _("immediate value"));
1477 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1478 int idx
, int lower_bound
, int upper_bound
)
1480 if (mismatch_detail
== NULL
)
1482 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1483 _("immediate offset"));
1487 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1488 int idx
, int lower_bound
, int upper_bound
)
1490 if (mismatch_detail
== NULL
)
1492 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1493 _("register number"));
1497 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1498 int idx
, int lower_bound
, int upper_bound
)
1500 if (mismatch_detail
== NULL
)
1502 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1503 _("register element index"));
1507 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1508 int idx
, int lower_bound
, int upper_bound
)
1510 if (mismatch_detail
== NULL
)
1512 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1516 /* Report that the MUL modifier in operand IDX should be in the range
1517 [LOWER_BOUND, UPPER_BOUND]. */
1519 set_multiplier_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1520 int idx
, int lower_bound
, int upper_bound
)
1522 if (mismatch_detail
== NULL
)
1524 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1529 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1532 if (mismatch_detail
== NULL
)
1534 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1535 mismatch_detail
->data
[0].i
= alignment
;
1539 set_reg_list_length_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1542 if (mismatch_detail
== NULL
)
1544 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST_LENGTH
, idx
, NULL
);
1545 mismatch_detail
->data
[0].i
= 1 << expected_num
;
1549 set_reg_list_stride_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1552 if (mismatch_detail
== NULL
)
1554 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST_STRIDE
, idx
, NULL
);
1555 mismatch_detail
->data
[0].i
= 1 << expected_num
;
1559 set_invalid_vg_size (aarch64_operand_error
*mismatch_detail
,
1560 int idx
, int expected
)
1562 if (mismatch_detail
== NULL
)
1564 set_error (mismatch_detail
, AARCH64_OPDE_INVALID_VG_SIZE
, idx
, NULL
);
1565 mismatch_detail
->data
[0].i
= expected
;
1569 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1572 if (mismatch_detail
== NULL
)
1574 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1577 /* Check that indexed register operand OPND has a register in the range
1578 [MIN_REGNO, MAX_REGNO] and an index in the range [MIN_INDEX, MAX_INDEX].
1579 PREFIX is the register prefix, such as "z" for SVE vector registers. */
1582 check_reglane (const aarch64_opnd_info
*opnd
,
1583 aarch64_operand_error
*mismatch_detail
, int idx
,
1584 const char *prefix
, int min_regno
, int max_regno
,
1585 int min_index
, int max_index
)
1587 if (!value_in_range_p (opnd
->reglane
.regno
, min_regno
, max_regno
))
1589 set_invalid_regno_error (mismatch_detail
, idx
, prefix
, min_regno
,
1593 if (!value_in_range_p (opnd
->reglane
.index
, min_index
, max_index
))
1595 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, min_index
,
1602 /* Check that register list operand OPND has NUM_REGS registers and a
1603 register stride of STRIDE. */
1606 check_reglist (const aarch64_opnd_info
*opnd
,
1607 aarch64_operand_error
*mismatch_detail
, int idx
,
1608 int num_regs
, int stride
)
1610 if (opnd
->reglist
.num_regs
!= num_regs
)
1612 set_reg_list_length_error (mismatch_detail
, idx
, num_regs
);
1615 if (opnd
->reglist
.stride
!= stride
)
1617 set_reg_list_stride_error (mismatch_detail
, idx
, stride
);
1623 /* Check that indexed ZA operand OPND has:
1625 - a selection register in the range [MIN_WREG, MIN_WREG + 3]
1627 - RANGE_SIZE consecutive immediate offsets.
1629 - an initial immediate offset that is a multiple of RANGE_SIZE
1630 in the range [0, MAX_VALUE * RANGE_SIZE]
1632 - a vector group size of GROUP_SIZE.
1634 - STATUS_VG for cases where VGx2 or VGx4 is mandatory. */
1636 check_za_access (const aarch64_opnd_info
*opnd
,
1637 aarch64_operand_error
*mismatch_detail
, int idx
,
1638 int min_wreg
, int max_value
, unsigned int range_size
,
1639 int group_size
, bool status_vg
)
1641 if (!value_in_range_p (opnd
->indexed_za
.index
.regno
, min_wreg
, min_wreg
+ 3))
1644 set_other_error (mismatch_detail
, idx
,
1645 _("expected a selection register in the"
1647 else if (min_wreg
== 8)
1648 set_other_error (mismatch_detail
, idx
,
1649 _("expected a selection register in the"
1656 int max_index
= max_value
* range_size
;
1657 if (!value_in_range_p (opnd
->indexed_za
.index
.imm
, 0, max_index
))
1659 set_offset_out_of_range_error (mismatch_detail
, idx
, 0, max_index
);
1663 if ((opnd
->indexed_za
.index
.imm
% range_size
) != 0)
1665 assert (range_size
== 2 || range_size
== 4);
1666 set_other_error (mismatch_detail
, idx
,
1668 ? _("starting offset is not a multiple of 2")
1669 : _("starting offset is not a multiple of 4"));
1673 if (opnd
->indexed_za
.index
.countm1
!= range_size
- 1)
1675 if (range_size
== 1)
1676 set_other_error (mismatch_detail
, idx
,
1677 _("expected a single offset rather than"
1679 else if (range_size
== 2)
1680 set_other_error (mismatch_detail
, idx
,
1681 _("expected a range of two offsets"));
1682 else if (range_size
== 4)
1683 set_other_error (mismatch_detail
, idx
,
1684 _("expected a range of four offsets"));
1690 /* The vector group specifier is optional in assembly code. */
1691 if (opnd
->indexed_za
.group_size
!= group_size
1692 && (status_vg
|| opnd
->indexed_za
.group_size
!= 0 ))
1694 set_invalid_vg_size (mismatch_detail
, idx
, group_size
);
1701 /* Given a load/store operation, calculate the size of transferred data via a
1702 cumulative sum of qualifier sizes preceding the address operand in the
1703 OPNDS operand list argument. */
1705 calc_ldst_datasize (const aarch64_opnd_info
*opnds
)
1707 unsigned num_bytes
= 0; /* total number of bytes transferred. */
1708 enum aarch64_operand_class opnd_class
;
1709 enum aarch64_opnd type
;
1711 for (int i
= 0; i
< AARCH64_MAX_OPND_NUM
; i
++)
1713 type
= opnds
[i
].type
;
1714 opnd_class
= aarch64_operands
[type
].op_class
;
1715 if (opnd_class
== AARCH64_OPND_CLASS_ADDRESS
)
1717 num_bytes
+= aarch64_get_qualifier_esize (opnds
[i
].qualifier
);
1723 /* General constraint checking based on operand code.
1725 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1726 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1728 This function has to be called after the qualifiers for all operands
1731 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1732 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1733 of error message during the disassembling where error message is not
1734 wanted. We avoid the dynamic construction of strings of error messages
1735 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1736 use a combination of error code, static string and some integer data to
1737 represent an error. */
1740 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1741 enum aarch64_opnd type
,
1742 const aarch64_opcode
*opcode
,
1743 aarch64_operand_error
*mismatch_detail
)
1745 unsigned num
, modifiers
, shift
;
1747 int64_t imm
, min_value
, max_value
;
1748 uint64_t uvalue
, mask
;
1749 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1750 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1753 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1755 switch (aarch64_operands
[type
].op_class
)
1757 case AARCH64_OPND_CLASS_INT_REG
:
1758 /* Check for pair of xzr registers. */
1759 if (type
== AARCH64_OPND_PAIRREG_OR_XZR
1760 && opnds
[idx
- 1].reg
.regno
== 0x1f)
1762 if (opnds
[idx
].reg
.regno
!= 0x1f)
1764 set_syntax_error (mismatch_detail
, idx
- 1,
1765 _("second reg in pair should be xzr if first is"
1770 /* Check pair reg constraints for instructions taking a pair of
1771 consecutively-numbered general-purpose registers. */
1772 else if (type
== AARCH64_OPND_PAIRREG
1773 || type
== AARCH64_OPND_PAIRREG_OR_XZR
)
1775 assert (idx
== 1 || idx
== 2 || idx
== 3 || idx
== 5);
1776 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1778 set_syntax_error (mismatch_detail
, idx
- 1,
1779 _("reg pair must start from even reg"));
1782 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1784 set_syntax_error (mismatch_detail
, idx
,
1785 _("reg pair must be contiguous"));
1791 /* <Xt> may be optional in some IC and TLBI instructions. */
1792 if (type
== AARCH64_OPND_Rt_SYS
)
1794 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1795 == AARCH64_OPND_CLASS_SYSTEM
));
1796 if (opnds
[1].present
1797 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1799 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1802 if (!opnds
[1].present
1803 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1805 set_other_error (mismatch_detail
, idx
, _("missing register"));
1811 case AARCH64_OPND_QLF_WSP
:
1812 case AARCH64_OPND_QLF_SP
:
1813 if (!aarch64_stack_pointer_p (opnd
))
1815 set_other_error (mismatch_detail
, idx
,
1816 _("stack pointer register expected"));
1825 case AARCH64_OPND_CLASS_SVE_REG
:
1828 case AARCH64_OPND_SVE_Zm3_INDEX
:
1829 case AARCH64_OPND_SVE_Zm3_22_INDEX
:
1830 case AARCH64_OPND_SVE_Zm3_19_INDEX
:
1831 case AARCH64_OPND_SVE_Zm3_11_INDEX
:
1832 case AARCH64_OPND_SVE_Zm3_10_INDEX
:
1833 case AARCH64_OPND_SVE_Zm4_11_INDEX
:
1834 case AARCH64_OPND_SVE_Zm4_INDEX
:
1835 size
= get_operand_fields_width (get_operand_from_code (type
));
1836 shift
= get_operand_specific_data (&aarch64_operands
[type
]);
1837 if (!check_reglane (opnd
, mismatch_detail
, idx
,
1838 "z", 0, (1 << shift
) - 1,
1839 0, (1u << (size
- shift
)) - 1))
1843 case AARCH64_OPND_SVE_Zm1_23_INDEX
:
1844 size
= get_operand_fields_width (get_operand_from_code (type
));
1845 if (!check_reglane (opnd
, mismatch_detail
, idx
, "z", 0, 31, 0, 1))
1849 case AARCH64_OPND_SVE_Zm2_22_INDEX
:
1850 size
= get_operand_fields_width (get_operand_from_code (type
));
1851 if (!check_reglane (opnd
, mismatch_detail
, idx
, "z", 0, 31, 0, 3))
1855 case AARCH64_OPND_SVE_Zn_INDEX
:
1856 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1857 if (!check_reglane (opnd
, mismatch_detail
, idx
, "z", 0, 31,
1862 case AARCH64_OPND_SVE_Zn_5_INDEX
:
1863 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1864 if (!check_reglane (opnd
, mismatch_detail
, idx
, "z", 0, 31,
1869 case AARCH64_OPND_SME_PNn3_INDEX1
:
1870 case AARCH64_OPND_SME_PNn3_INDEX2
:
1871 size
= get_operand_field_width (get_operand_from_code (type
), 1);
1872 if (!check_reglane (opnd
, mismatch_detail
, idx
, "pn", 8, 15,
1873 0, (1 << size
) - 1))
1877 case AARCH64_OPND_SVE_Zm3_12_INDEX
:
1878 case AARCH64_OPND_SME_Zn_INDEX1_16
:
1879 case AARCH64_OPND_SME_Zn_INDEX2_15
:
1880 case AARCH64_OPND_SME_Zn_INDEX2_16
:
1881 case AARCH64_OPND_SME_Zn_INDEX3_14
:
1882 case AARCH64_OPND_SME_Zn_INDEX3_15
:
1883 case AARCH64_OPND_SME_Zn_INDEX4_14
:
1884 case AARCH64_OPND_SVE_Zn0_INDEX
:
1885 case AARCH64_OPND_SVE_Zn1_17_INDEX
:
1886 case AARCH64_OPND_SVE_Zn2_18_INDEX
:
1887 case AARCH64_OPND_SVE_Zn3_22_INDEX
:
1888 case AARCH64_OPND_SVE_Zd0_INDEX
:
1889 case AARCH64_OPND_SVE_Zd1_17_INDEX
:
1890 case AARCH64_OPND_SVE_Zd2_18_INDEX
:
1891 case AARCH64_OPND_SVE_Zd3_22_INDEX
:
1892 size
= get_operand_fields_width (get_operand_from_code (type
)) - 5;
1893 if (!check_reglane (opnd
, mismatch_detail
, idx
, "z", 0, 31,
1894 0, (1 << size
) - 1))
1898 case AARCH64_OPND_SME_Zm_INDEX1
:
1899 case AARCH64_OPND_SME_Zm_INDEX2
:
1900 case AARCH64_OPND_SME_Zm_INDEX2_3
:
1901 case AARCH64_OPND_SME_Zm_INDEX3_1
:
1902 case AARCH64_OPND_SME_Zm_INDEX3_2
:
1903 case AARCH64_OPND_SME_Zm_INDEX3_3
:
1904 case AARCH64_OPND_SME_Zm_INDEX3_10
:
1905 case AARCH64_OPND_SME_Zm_INDEX4_1
:
1906 case AARCH64_OPND_SME_Zm_INDEX4_2
:
1907 case AARCH64_OPND_SME_Zm_INDEX4_3
:
1908 case AARCH64_OPND_SME_Zm_INDEX4_10
:
1909 size
= get_operand_fields_width (get_operand_from_code (type
)) - 4;
1910 if (!check_reglane (opnd
, mismatch_detail
, idx
, "z", 0, 15,
1911 0, (1 << size
) - 1))
1915 case AARCH64_OPND_SME_Zm
:
1916 if (opnd
->reg
.regno
> 15)
1918 set_invalid_regno_error (mismatch_detail
, idx
, "z", 0, 15);
1923 case AARCH64_OPND_SME_PnT_Wm_imm
:
1924 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1925 max_value
= 16 / size
- 1;
1926 if (!check_za_access (opnd
, mismatch_detail
, idx
,
1927 12, max_value
, 1, 0, get_opcode_dependent_value (opcode
)))
1936 case AARCH64_OPND_CLASS_SVE_REGLIST
:
1939 case AARCH64_OPND_SME_Pdx2
:
1940 case AARCH64_OPND_SME_Zdnx2
:
1941 case AARCH64_OPND_SME_Zdnx4
:
1942 case AARCH64_OPND_SME_Zmx2
:
1943 case AARCH64_OPND_SME_Zmx4
:
1944 case AARCH64_OPND_SME_Znx2
:
1945 case AARCH64_OPND_SME_Znx2_BIT_INDEX
:
1946 case AARCH64_OPND_SME_Znx4
:
1947 num
= get_operand_specific_data (&aarch64_operands
[type
]);
1948 if (!check_reglist (opnd
, mismatch_detail
, idx
, num
, 1))
1950 if ((opnd
->reglist
.first_regno
% num
) != 0)
1952 set_other_error (mismatch_detail
, idx
,
1953 _("start register out of range"));
1958 case AARCH64_OPND_SME_Zdnx4_STRIDED
:
1959 case AARCH64_OPND_SME_Ztx2_STRIDED
:
1960 case AARCH64_OPND_SME_Ztx4_STRIDED
:
1961 /* 2-register lists have a stride of 8 and 4-register lists
1962 have a stride of 4. */
1963 num
= get_operand_specific_data (&aarch64_operands
[type
]);
1964 if (!check_reglist (opnd
, mismatch_detail
, idx
, num
, 16 / num
))
1966 num
= 16 | (opnd
->reglist
.stride
- 1);
1967 if ((opnd
->reglist
.first_regno
& ~num
) != 0)
1969 set_other_error (mismatch_detail
, idx
,
1970 _("start register out of range"));
1975 case AARCH64_OPND_SME_PdxN
:
1976 case AARCH64_OPND_SVE_ZnxN
:
1977 case AARCH64_OPND_SVE_ZtxN
:
1978 num
= get_opcode_dependent_value (opcode
);
1979 if (!check_reglist (opnd
, mismatch_detail
, idx
, num
, 1))
1988 case AARCH64_OPND_CLASS_ZA_ACCESS
:
1991 case AARCH64_OPND_SME_ZA_HV_idx_src
:
1992 case AARCH64_OPND_SME_ZA_HV_idx_dest
:
1993 case AARCH64_OPND_SME_ZA_HV_idx_ldstr
:
1994 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1995 max_value
= 16 / size
- 1;
1996 if (!check_za_access (opnd
, mismatch_detail
, idx
, 12, max_value
, 1,
1997 get_opcode_dependent_value (opcode
),
1998 get_opcode_dependent_vg_status (opcode
)))
2002 case AARCH64_OPND_SME_ZA_array_off4
:
2003 if (!check_za_access (opnd
, mismatch_detail
, idx
, 12, 15, 1,
2004 get_opcode_dependent_value (opcode
),
2005 get_opcode_dependent_vg_status (opcode
)))
2009 case AARCH64_OPND_SME_ZA_array_off3_0
:
2010 case AARCH64_OPND_SME_ZA_array_off3_5
:
2011 if (!check_za_access (opnd
, mismatch_detail
, idx
, 8, 7, 1,
2012 get_opcode_dependent_value (opcode
),
2013 get_opcode_dependent_vg_status (opcode
)))
2017 case AARCH64_OPND_SME_ZA_array_off1x4
:
2018 if (!check_za_access (opnd
, mismatch_detail
, idx
, 8, 1, 4,
2019 get_opcode_dependent_value (opcode
),
2020 get_opcode_dependent_vg_status (opcode
)))
2024 case AARCH64_OPND_SME_ZA_array_off2x2
:
2025 if (!check_za_access (opnd
, mismatch_detail
, idx
, 8, 3, 2,
2026 get_opcode_dependent_value (opcode
),
2027 get_opcode_dependent_vg_status (opcode
)))
2031 case AARCH64_OPND_SME_ZA_array_off2x4
:
2032 if (!check_za_access (opnd
, mismatch_detail
, idx
, 8, 3, 4,
2033 get_opcode_dependent_value (opcode
),
2034 get_opcode_dependent_vg_status (opcode
)))
2038 case AARCH64_OPND_SME_ZA_array_off3x2
:
2039 if (!check_za_access (opnd
, mismatch_detail
, idx
, 8, 7, 2,
2040 get_opcode_dependent_value (opcode
),
2041 get_opcode_dependent_vg_status (opcode
)))
2045 case AARCH64_OPND_SME_ZA_array_vrsb_1
:
2046 if (!check_za_access (opnd
, mismatch_detail
, idx
, 12, 7, 2,
2047 get_opcode_dependent_value (opcode
),
2048 get_opcode_dependent_vg_status (opcode
)))
2052 case AARCH64_OPND_SME_ZA_array_vrsh_1
:
2053 if (!check_za_access (opnd
, mismatch_detail
, idx
, 12, 3, 2,
2054 get_opcode_dependent_value (opcode
),
2055 get_opcode_dependent_vg_status (opcode
)))
2059 case AARCH64_OPND_SME_ZA_array_vrss_1
:
2060 if (!check_za_access (opnd
, mismatch_detail
, idx
, 12, 1, 2,
2061 get_opcode_dependent_value (opcode
),
2062 get_opcode_dependent_vg_status (opcode
)))
2066 case AARCH64_OPND_SME_ZA_array_vrsd_1
:
2067 if (!check_za_access (opnd
, mismatch_detail
, idx
, 12, 0, 2,
2068 get_opcode_dependent_value (opcode
),
2069 get_opcode_dependent_vg_status (opcode
)))
2073 case AARCH64_OPND_SME_ZA_array_vrsb_2
:
2074 if (!check_za_access (opnd
, mismatch_detail
, idx
, 12, 3, 4,
2075 get_opcode_dependent_value (opcode
),
2076 get_opcode_dependent_vg_status (opcode
)))
2080 case AARCH64_OPND_SME_ZA_array_vrsh_2
:
2081 if (!check_za_access (opnd
, mismatch_detail
, idx
, 12, 1, 4,
2082 get_opcode_dependent_value (opcode
),
2083 get_opcode_dependent_vg_status (opcode
)))
2087 case AARCH64_OPND_SME_ZA_ARRAY4
:
2088 if (!check_za_access (opnd
, mismatch_detail
, idx
, 12, 15, 1,
2089 get_opcode_dependent_value (opcode
),
2090 get_opcode_dependent_vg_status (opcode
)))
2094 case AARCH64_OPND_SME_ZA_array_vrss_2
:
2095 case AARCH64_OPND_SME_ZA_array_vrsd_2
:
2096 if (!check_za_access (opnd
, mismatch_detail
, idx
, 12, 0, 4,
2097 get_opcode_dependent_value (opcode
),
2098 get_opcode_dependent_vg_status (opcode
)))
2102 case AARCH64_OPND_SME_ZA_HV_idx_srcxN
:
2103 case AARCH64_OPND_SME_ZA_HV_idx_destxN
:
2104 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
2105 num
= get_opcode_dependent_value (opcode
);
2106 max_value
= 16 / num
/ size
;
2109 if (!check_za_access (opnd
, mismatch_detail
, idx
, 12, max_value
, num
,
2110 0, get_opcode_dependent_value (opcode
)))
2119 case AARCH64_OPND_CLASS_PRED_REG
:
2122 case AARCH64_OPND_SME_PNd3
:
2123 case AARCH64_OPND_SME_PNg3
:
2124 if (opnd
->reg
.regno
< 8)
2126 set_invalid_regno_error (mismatch_detail
, idx
, "pn", 8, 15);
2132 if (opnd
->reg
.regno
>= 8
2133 && get_operand_fields_width (get_operand_from_code (type
)) == 3)
2135 set_invalid_regno_error (mismatch_detail
, idx
, "p", 0, 7);
2142 case AARCH64_OPND_CLASS_COND
:
2143 if (type
== AARCH64_OPND_COND1
2144 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
2146 /* Not allow AL or NV. */
2147 set_syntax_error (mismatch_detail
, idx
, NULL
);
2151 case AARCH64_OPND_CLASS_ADDRESS
:
2152 /* Check writeback. */
2153 switch (opcode
->iclass
)
2157 case ldstnapair_offs
:
2160 if (opnd
->addr
.writeback
== 1)
2162 set_syntax_error (mismatch_detail
, idx
,
2163 _("unexpected address writeback"));
2168 if (opnd
->addr
.writeback
== 1 && opnd
->addr
.preind
!= 1)
2170 set_syntax_error (mismatch_detail
, idx
,
2171 _("unexpected address writeback"));
2176 case ldstpair_indexed
:
2179 if (opnd
->addr
.writeback
== 0)
2181 set_syntax_error (mismatch_detail
, idx
,
2182 _("address writeback expected"));
2187 if (opnd
->addr
.writeback
)
2188 if ((type
== AARCH64_OPND_RCPC3_ADDR_PREIND_WB
2189 && !opnd
->addr
.preind
)
2190 || (type
== AARCH64_OPND_RCPC3_ADDR_POSTIND
2191 && !opnd
->addr
.postind
))
2193 set_syntax_error (mismatch_detail
, idx
,
2194 _("unexpected address writeback"));
2200 assert (opnd
->addr
.writeback
== 0);
2205 case AARCH64_OPND_ADDR_SIMM7
:
2206 /* Scaled signed 7 bits immediate offset. */
2207 /* Get the size of the data element that is accessed, which may be
2208 different from that of the source register size,
2209 e.g. in strb/ldrb. */
2210 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
2211 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
2213 set_offset_out_of_range_error (mismatch_detail
, idx
,
2214 -64 * size
, 63 * size
);
2217 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
2219 set_unaligned_error (mismatch_detail
, idx
, size
);
2223 case AARCH64_OPND_ADDR_OFFSET
:
2224 case AARCH64_OPND_ADDR_SIMM9
:
2225 /* Unscaled signed 9 bits immediate offset. */
2226 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
2228 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
2233 case AARCH64_OPND_ADDR_SIMM9_2
:
2234 /* Unscaled signed 9 bits immediate offset, which has to be negative
2236 size
= aarch64_get_qualifier_esize (qualifier
);
2237 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
2238 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
2239 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
2241 set_other_error (mismatch_detail
, idx
,
2242 _("negative or unaligned offset expected"));
2245 case AARCH64_OPND_ADDR_SIMM10
:
2246 /* Scaled signed 10 bits immediate offset. */
2247 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -4096, 4088))
2249 set_offset_out_of_range_error (mismatch_detail
, idx
, -4096, 4088);
2252 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 8))
2254 set_unaligned_error (mismatch_detail
, idx
, 8);
2259 case AARCH64_OPND_ADDR_SIMM11
:
2260 /* Signed 11 bits immediate offset (multiple of 16). */
2261 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -1024, 1008))
2263 set_offset_out_of_range_error (mismatch_detail
, idx
, -1024, 1008);
2267 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 16))
2269 set_unaligned_error (mismatch_detail
, idx
, 16);
2274 case AARCH64_OPND_ADDR_SIMM13
:
2275 /* Signed 13 bits immediate offset (multiple of 16). */
2276 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -4096, 4080))
2278 set_offset_out_of_range_error (mismatch_detail
, idx
, -4096, 4080);
2282 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 16))
2284 set_unaligned_error (mismatch_detail
, idx
, 16);
2289 case AARCH64_OPND_SIMD_ADDR_POST
:
2290 /* AdvSIMD load/store multiple structures, post-index. */
2292 if (opnd
->addr
.offset
.is_reg
)
2294 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
2298 set_other_error (mismatch_detail
, idx
,
2299 _("invalid register offset"));
2305 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
2306 unsigned num_bytes
; /* total number of bytes transferred. */
2307 /* The opcode dependent area stores the number of elements in
2308 each structure to be loaded/stored. */
2309 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
2310 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
2311 /* Special handling of loading single structure to all lane. */
2312 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
2313 * aarch64_get_qualifier_esize (prev
->qualifier
);
2315 num_bytes
= prev
->reglist
.num_regs
2316 * aarch64_get_qualifier_esize (prev
->qualifier
)
2317 * aarch64_get_qualifier_nelem (prev
->qualifier
);
2318 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
2320 set_other_error (mismatch_detail
, idx
,
2321 _("invalid post-increment amount"));
2327 case AARCH64_OPND_ADDR_REGOFF
:
2328 /* Get the size of the data element that is accessed, which may be
2329 different from that of the source register size,
2330 e.g. in strb/ldrb. */
2331 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
2332 /* It is either no shift or shift by the binary logarithm of SIZE. */
2333 if (opnd
->shifter
.amount
!= 0
2334 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
2336 set_other_error (mismatch_detail
, idx
,
2337 _("invalid shift amount"));
2340 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
2342 switch (opnd
->shifter
.kind
)
2344 case AARCH64_MOD_UXTW
:
2345 case AARCH64_MOD_LSL
:
2346 case AARCH64_MOD_SXTW
:
2347 case AARCH64_MOD_SXTX
: break;
2349 set_other_error (mismatch_detail
, idx
,
2350 _("invalid extend/shift operator"));
2355 case AARCH64_OPND_ADDR_UIMM12
:
2356 imm
= opnd
->addr
.offset
.imm
;
2357 /* Get the size of the data element that is accessed, which may be
2358 different from that of the source register size,
2359 e.g. in strb/ldrb. */
2360 size
= aarch64_get_qualifier_esize (qualifier
);
2361 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
2363 set_offset_out_of_range_error (mismatch_detail
, idx
,
2367 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
2369 set_unaligned_error (mismatch_detail
, idx
, size
);
2374 case AARCH64_OPND_ADDR_PCREL14
:
2375 case AARCH64_OPND_ADDR_PCREL19
:
2376 case AARCH64_OPND_ADDR_PCREL21
:
2377 case AARCH64_OPND_ADDR_PCREL26
:
2378 imm
= opnd
->imm
.value
;
2379 if (operand_need_shift_by_two (get_operand_from_code (type
)))
2381 /* The offset value in a PC-relative branch instruction is alway
2382 4-byte aligned and is encoded without the lowest 2 bits. */
2383 if (!value_aligned_p (imm
, 4))
2385 set_unaligned_error (mismatch_detail
, idx
, 4);
2388 /* Right shift by 2 so that we can carry out the following check
2392 size
= get_operand_fields_width (get_operand_from_code (type
));
2393 if (!value_fit_signed_field_p (imm
, size
))
2395 set_other_error (mismatch_detail
, idx
,
2396 _("immediate out of range"));
2401 case AARCH64_OPND_SME_ADDR_RI_U4xVL
:
2402 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 15))
2404 set_offset_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2409 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
2410 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
2411 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
2412 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
2416 assert (!opnd
->addr
.offset
.is_reg
);
2417 assert (opnd
->addr
.preind
);
2418 num
= 1 + get_operand_specific_data (&aarch64_operands
[type
]);
2421 if ((opnd
->addr
.offset
.imm
!= 0 && !opnd
->shifter
.operator_present
)
2422 || (opnd
->shifter
.operator_present
2423 && opnd
->shifter
.kind
!= AARCH64_MOD_MUL_VL
))
2425 set_other_error (mismatch_detail
, idx
,
2426 _("invalid addressing mode"));
2429 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
2431 set_offset_out_of_range_error (mismatch_detail
, idx
,
2432 min_value
, max_value
);
2435 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
2437 set_unaligned_error (mismatch_detail
, idx
, num
);
2442 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
2445 goto sve_imm_offset_vl
;
2447 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
2450 goto sve_imm_offset_vl
;
2452 case AARCH64_OPND_SVE_ADDR_RI_U6
:
2453 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
2454 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
2455 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
2459 assert (!opnd
->addr
.offset
.is_reg
);
2460 assert (opnd
->addr
.preind
);
2461 num
= 1 << get_operand_specific_data (&aarch64_operands
[type
]);
2464 if (opnd
->shifter
.operator_present
2465 || opnd
->shifter
.amount_present
)
2467 set_other_error (mismatch_detail
, idx
,
2468 _("invalid addressing mode"));
2471 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
2473 set_offset_out_of_range_error (mismatch_detail
, idx
,
2474 min_value
, max_value
);
2477 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
2479 set_unaligned_error (mismatch_detail
, idx
, num
);
2484 case AARCH64_OPND_SVE_ADDR_RI_S4x16
:
2485 case AARCH64_OPND_SVE_ADDR_RI_S4x32
:
2488 goto sve_imm_offset
;
2490 case AARCH64_OPND_SVE_ADDR_ZX
:
2491 /* Everything is already ensured by parse_operands or
2492 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
2494 assert (opnd
->addr
.offset
.is_reg
);
2495 assert (opnd
->addr
.preind
);
2496 assert ((aarch64_operands
[type
].flags
& OPD_F_NO_ZR
) == 0);
2497 assert (opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2498 assert (opnd
->shifter
.operator_present
== 0);
2501 case AARCH64_OPND_SVE_ADDR_R
:
2502 case AARCH64_OPND_SVE_ADDR_RR
:
2503 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
2504 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
2505 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
2506 case AARCH64_OPND_SVE_ADDR_RR_LSL4
:
2507 case AARCH64_OPND_SVE_ADDR_RX
:
2508 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
2509 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
2510 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
2511 case AARCH64_OPND_SVE_ADDR_RX_LSL4
:
2512 case AARCH64_OPND_SVE_ADDR_RZ
:
2513 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
2514 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
2515 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
2516 modifiers
= 1 << AARCH64_MOD_LSL
;
2518 assert (opnd
->addr
.offset
.is_reg
);
2519 assert (opnd
->addr
.preind
);
2520 if ((aarch64_operands
[type
].flags
& OPD_F_NO_ZR
) != 0
2521 && opnd
->addr
.offset
.regno
== 31)
2523 set_other_error (mismatch_detail
, idx
,
2524 _("index register xzr is not allowed"));
2527 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
2528 || (opnd
->shifter
.amount
2529 != get_operand_specific_data (&aarch64_operands
[type
])))
2531 set_other_error (mismatch_detail
, idx
,
2532 _("invalid addressing mode"));
2537 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
2538 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
2539 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
2540 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
2541 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
2542 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
2543 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
2544 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
2545 modifiers
= (1 << AARCH64_MOD_SXTW
) | (1 << AARCH64_MOD_UXTW
);
2546 goto sve_rr_operand
;
2548 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
2549 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
2550 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
2551 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
2554 goto sve_imm_offset
;
2556 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
2557 modifiers
= 1 << AARCH64_MOD_LSL
;
2559 assert (opnd
->addr
.offset
.is_reg
);
2560 assert (opnd
->addr
.preind
);
2561 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
2562 || opnd
->shifter
.amount
< 0
2563 || opnd
->shifter
.amount
> 3)
2565 set_other_error (mismatch_detail
, idx
,
2566 _("invalid addressing mode"));
2571 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
2572 modifiers
= (1 << AARCH64_MOD_SXTW
);
2573 goto sve_zz_operand
;
2575 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
2576 modifiers
= 1 << AARCH64_MOD_UXTW
;
2577 goto sve_zz_operand
;
2579 case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB
:
2580 case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND
:
2581 case AARCH64_OPND_RCPC3_ADDR_PREIND_WB
:
2582 case AARCH64_OPND_RCPC3_ADDR_POSTIND
:
2584 int num_bytes
= calc_ldst_datasize (opnds
);
2585 int abs_offset
= (type
== AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB
2586 || type
== AARCH64_OPND_RCPC3_ADDR_PREIND_WB
)
2587 ? opnd
->addr
.offset
.imm
* -1
2588 : opnd
->addr
.offset
.imm
;
2589 if ((int) num_bytes
!= abs_offset
2590 && opnd
->addr
.offset
.imm
!= 0)
2592 set_other_error (mismatch_detail
, idx
,
2593 _("invalid increment amount"));
2599 case AARCH64_OPND_RCPC3_ADDR_OFFSET
:
2600 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
2602 set_imm_out_of_range_error (mismatch_detail
, idx
, -256, 255);
2611 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
2612 if (type
== AARCH64_OPND_LEt
)
2614 /* Get the upper bound for the element index. */
2615 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
2616 if (!value_in_range_p (opnd
->reglist
.index
, 0, num
))
2618 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2622 /* The opcode dependent area stores the number of elements in
2623 each structure to be loaded/stored. */
2624 num
= get_opcode_dependent_value (opcode
);
2627 case AARCH64_OPND_LVn_LUT
:
2628 if (!check_reglist (opnd
, mismatch_detail
, idx
, num
, 1))
2631 case AARCH64_OPND_LVt
:
2632 assert (num
>= 1 && num
<= 4);
2633 /* Unless LD1/ST1, the number of registers should be equal to that
2634 of the structure elements. */
2635 if (num
!= 1 && !check_reglist (opnd
, mismatch_detail
, idx
, num
, 1))
2638 case AARCH64_OPND_LVt_AL
:
2639 case AARCH64_OPND_LEt
:
2640 assert (num
>= 1 && num
<= 4);
2641 /* The number of registers should be equal to that of the structure
2643 if (!check_reglist (opnd
, mismatch_detail
, idx
, num
, 1))
2649 if (opnd
->reglist
.stride
!= 1)
2651 set_reg_list_stride_error (mismatch_detail
, idx
, 1);
2656 case AARCH64_OPND_CLASS_IMMEDIATE
:
2657 /* Constraint check on immediate operand. */
2658 imm
= opnd
->imm
.value
;
2659 /* E.g. imm_0_31 constrains value to be 0..31. */
2660 if (qualifier_value_in_range_constraint_p (qualifier
)
2661 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
2662 get_upper_bound (qualifier
)))
2664 set_imm_out_of_range_error (mismatch_detail
, idx
,
2665 get_lower_bound (qualifier
),
2666 get_upper_bound (qualifier
));
2672 case AARCH64_OPND_AIMM
:
2673 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2675 set_other_error (mismatch_detail
, idx
,
2676 _("invalid shift operator"));
2679 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
2681 set_other_error (mismatch_detail
, idx
,
2682 _("shift amount must be 0 or 12"));
2685 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
2687 set_other_error (mismatch_detail
, idx
,
2688 _("immediate out of range"));
2693 case AARCH64_OPND_HALF
:
2694 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
2695 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2697 set_other_error (mismatch_detail
, idx
,
2698 _("invalid shift operator"));
2701 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2702 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
2704 set_other_error (mismatch_detail
, idx
,
2705 _("shift amount must be a multiple of 16"));
2708 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
2710 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
2714 if (opnd
->imm
.value
< 0)
2716 set_other_error (mismatch_detail
, idx
,
2717 _("negative immediate value not allowed"));
2720 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
2722 set_other_error (mismatch_detail
, idx
,
2723 _("immediate out of range"));
2728 case AARCH64_OPND_IMM_MOV
:
2730 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2731 imm
= opnd
->imm
.value
;
2735 case OP_MOV_IMM_WIDEN
:
2738 case OP_MOV_IMM_WIDE
:
2739 if (!aarch64_wide_constant_p (imm
, esize
== 4, NULL
))
2741 set_other_error (mismatch_detail
, idx
,
2742 _("immediate out of range"));
2746 case OP_MOV_IMM_LOG
:
2747 if (!aarch64_logical_immediate_p (imm
, esize
, NULL
))
2749 set_other_error (mismatch_detail
, idx
,
2750 _("immediate out of range"));
2761 case AARCH64_OPND_NZCV
:
2762 case AARCH64_OPND_CCMP_IMM
:
2763 case AARCH64_OPND_EXCEPTION
:
2764 case AARCH64_OPND_UNDEFINED
:
2765 case AARCH64_OPND_TME_UIMM16
:
2766 case AARCH64_OPND_UIMM4
:
2767 case AARCH64_OPND_UIMM4_ADDG
:
2768 case AARCH64_OPND_UIMM7
:
2769 case AARCH64_OPND_UIMM3_OP1
:
2770 case AARCH64_OPND_UIMM3_OP2
:
2771 case AARCH64_OPND_SVE_UIMM3
:
2772 case AARCH64_OPND_SVE_UIMM7
:
2773 case AARCH64_OPND_SVE_UIMM8
:
2774 case AARCH64_OPND_SVE_UIMM4
:
2775 case AARCH64_OPND_SVE_UIMM8_53
:
2776 case AARCH64_OPND_CSSC_UIMM8
:
2777 size
= get_operand_fields_width (get_operand_from_code (type
));
2779 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
2781 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2787 case AARCH64_OPND_UIMM10
:
2788 /* Scaled unsigned 10 bits immediate offset. */
2789 if (!value_in_range_p (opnd
->imm
.value
, 0, 1008))
2791 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1008);
2795 if (!value_aligned_p (opnd
->imm
.value
, 16))
2797 set_unaligned_error (mismatch_detail
, idx
, 16);
2802 case AARCH64_OPND_SIMM5
:
2803 case AARCH64_OPND_SVE_SIMM5
:
2804 case AARCH64_OPND_SVE_SIMM5B
:
2805 case AARCH64_OPND_SVE_SIMM6
:
2806 case AARCH64_OPND_SVE_SIMM8
:
2807 case AARCH64_OPND_CSSC_SIMM8
:
2808 size
= get_operand_fields_width (get_operand_from_code (type
));
2810 if (!value_fit_signed_field_p (opnd
->imm
.value
, size
))
2812 set_imm_out_of_range_error (mismatch_detail
, idx
,
2814 (1 << (size
- 1)) - 1);
2819 case AARCH64_OPND_WIDTH
:
2820 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
2821 && opnds
[0].type
== AARCH64_OPND_Rd
);
2822 size
= get_upper_bound (qualifier
);
2823 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
2824 /* lsb+width <= reg.size */
2826 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
2827 size
- opnds
[idx
-1].imm
.value
);
2832 case AARCH64_OPND_LIMM
:
2833 case AARCH64_OPND_SVE_LIMM
:
2835 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2836 uint64_t uimm
= opnd
->imm
.value
;
2837 if (opcode
->op
== OP_BIC
)
2839 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2841 set_other_error (mismatch_detail
, idx
,
2842 _("immediate out of range"));
2848 case AARCH64_OPND_IMM0
:
2849 case AARCH64_OPND_FPIMM0
:
2850 if (opnd
->imm
.value
!= 0)
2852 set_other_error (mismatch_detail
, idx
,
2853 _("immediate zero expected"));
2858 case AARCH64_OPND_IMM_ROT1
:
2859 case AARCH64_OPND_IMM_ROT2
:
2860 case AARCH64_OPND_SVE_IMM_ROT2
:
2861 if (opnd
->imm
.value
!= 0
2862 && opnd
->imm
.value
!= 90
2863 && opnd
->imm
.value
!= 180
2864 && opnd
->imm
.value
!= 270)
2866 set_other_error (mismatch_detail
, idx
,
2867 _("rotate expected to be 0, 90, 180 or 270"));
2872 case AARCH64_OPND_IMM_ROT3
:
2873 case AARCH64_OPND_SVE_IMM_ROT1
:
2874 case AARCH64_OPND_SVE_IMM_ROT3
:
2875 if (opnd
->imm
.value
!= 90 && opnd
->imm
.value
!= 270)
2877 set_other_error (mismatch_detail
, idx
,
2878 _("rotate expected to be 90 or 270"));
2883 case AARCH64_OPND_SHLL_IMM
:
2885 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2886 if (opnd
->imm
.value
!= size
)
2888 set_other_error (mismatch_detail
, idx
,
2889 _("invalid shift amount"));
2894 case AARCH64_OPND_IMM_VLSL
:
2895 size
= aarch64_get_qualifier_esize (qualifier
);
2896 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
2898 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2904 case AARCH64_OPND_IMM_VLSR
:
2905 size
= aarch64_get_qualifier_esize (qualifier
);
2906 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
2908 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
2913 case AARCH64_OPND_SIMD_IMM
:
2914 case AARCH64_OPND_SIMD_IMM_SFT
:
2915 /* Qualifier check. */
2918 case AARCH64_OPND_QLF_LSL
:
2919 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2921 set_other_error (mismatch_detail
, idx
,
2922 _("invalid shift operator"));
2926 case AARCH64_OPND_QLF_MSL
:
2927 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
2929 set_other_error (mismatch_detail
, idx
,
2930 _("invalid shift operator"));
2934 case AARCH64_OPND_QLF_NIL
:
2935 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2937 set_other_error (mismatch_detail
, idx
,
2938 _("shift is not permitted"));
2946 /* Is the immediate valid? */
2948 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
2950 /* uimm8 or simm8 */
2951 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
2953 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
2957 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
2960 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2961 ffffffffgggggggghhhhhhhh'. */
2962 set_other_error (mismatch_detail
, idx
,
2963 _("invalid value for immediate"));
2966 /* Is the shift amount valid? */
2967 switch (opnd
->shifter
.kind
)
2969 case AARCH64_MOD_LSL
:
2970 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2971 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
2973 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
2977 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
2979 set_unaligned_error (mismatch_detail
, idx
, 8);
2983 case AARCH64_MOD_MSL
:
2984 /* Only 8 and 16 are valid shift amount. */
2985 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
2987 set_other_error (mismatch_detail
, idx
,
2988 _("shift amount must be 0 or 16"));
2993 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2995 set_other_error (mismatch_detail
, idx
,
2996 _("invalid shift operator"));
3003 case AARCH64_OPND_FPIMM
:
3004 case AARCH64_OPND_SIMD_FPIMM
:
3005 case AARCH64_OPND_SVE_FPIMM8
:
3006 if (opnd
->imm
.is_fp
== 0)
3008 set_other_error (mismatch_detail
, idx
,
3009 _("floating-point immediate expected"));
3012 /* The value is expected to be an 8-bit floating-point constant with
3013 sign, 3-bit exponent and normalized 4 bits of precision, encoded
3014 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
3016 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
3018 set_other_error (mismatch_detail
, idx
,
3019 _("immediate out of range"));
3022 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
3024 set_other_error (mismatch_detail
, idx
,
3025 _("invalid shift operator"));
3030 case AARCH64_OPND_SVE_AIMM
:
3033 assert (opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
3034 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
3035 mask
= ~((uint64_t) -1 << (size
* 4) << (size
* 4));
3036 uvalue
= opnd
->imm
.value
;
3037 shift
= opnd
->shifter
.amount
;
3042 set_other_error (mismatch_detail
, idx
,
3043 _("no shift amount allowed for"
3044 " 8-bit constants"));
3050 if (shift
!= 0 && shift
!= 8)
3052 set_other_error (mismatch_detail
, idx
,
3053 _("shift amount must be 0 or 8"));
3056 if (shift
== 0 && (uvalue
& 0xff) == 0)
3059 uvalue
= (int64_t) uvalue
/ 256;
3063 if ((uvalue
& mask
) != uvalue
&& (uvalue
| ~mask
) != uvalue
)
3065 set_other_error (mismatch_detail
, idx
,
3066 _("immediate too big for element size"));
3069 uvalue
= (uvalue
- min_value
) & mask
;
3072 set_other_error (mismatch_detail
, idx
,
3073 _("invalid arithmetic immediate"));
3078 case AARCH64_OPND_SVE_ASIMM
:
3082 case AARCH64_OPND_SVE_I1_HALF_ONE
:
3083 assert (opnd
->imm
.is_fp
);
3084 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x3f800000)
3086 set_other_error (mismatch_detail
, idx
,
3087 _("floating-point value must be 0.5 or 1.0"));
3092 case AARCH64_OPND_SVE_I1_HALF_TWO
:
3093 assert (opnd
->imm
.is_fp
);
3094 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x40000000)
3096 set_other_error (mismatch_detail
, idx
,
3097 _("floating-point value must be 0.5 or 2.0"));
3102 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
3103 assert (opnd
->imm
.is_fp
);
3104 if (opnd
->imm
.value
!= 0 && opnd
->imm
.value
!= 0x3f800000)
3106 set_other_error (mismatch_detail
, idx
,
3107 _("floating-point value must be 0.0 or 1.0"));
3112 case AARCH64_OPND_SVE_INV_LIMM
:
3114 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
3115 uint64_t uimm
= ~opnd
->imm
.value
;
3116 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
3118 set_other_error (mismatch_detail
, idx
,
3119 _("immediate out of range"));
3125 case AARCH64_OPND_SVE_LIMM_MOV
:
3127 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
3128 uint64_t uimm
= opnd
->imm
.value
;
3129 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
3131 set_other_error (mismatch_detail
, idx
,
3132 _("immediate out of range"));
3135 if (!aarch64_sve_dupm_mov_immediate_p (uimm
, esize
))
3137 set_other_error (mismatch_detail
, idx
,
3138 _("invalid replicated MOV immediate"));
3144 case AARCH64_OPND_SVE_PATTERN_SCALED
:
3145 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL
);
3146 if (!value_in_range_p (opnd
->shifter
.amount
, 1, 16))
3148 set_multiplier_out_of_range_error (mismatch_detail
, idx
, 1, 16);
3153 case AARCH64_OPND_SVE_SHLIMM_PRED
:
3154 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
3155 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22
:
3156 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
3157 if (!value_in_range_p (opnd
->imm
.value
, 0, 8 * size
- 1))
3159 set_imm_out_of_range_error (mismatch_detail
, idx
,
3165 case AARCH64_OPND_SME_SHRIMM4
:
3166 size
= 1 << get_operand_fields_width (get_operand_from_code (type
));
3167 if (!value_in_range_p (opnd
->imm
.value
, 1, size
))
3169 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
);
3174 case AARCH64_OPND_SME_SHRIMM5
:
3175 case AARCH64_OPND_SVE_SHRIMM_PRED
:
3176 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
3177 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22
:
3178 num
= (type
== AARCH64_OPND_SVE_SHRIMM_UNPRED_22
) ? 2 : 1;
3179 size
= aarch64_get_qualifier_esize (opnds
[idx
- num
].qualifier
);
3180 if (!value_in_range_p (opnd
->imm
.value
, 1, 8 * size
))
3182 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, 8*size
);
3187 case AARCH64_OPND_SME_ZT0_INDEX
:
3188 if (!value_in_range_p (opnd
->imm
.value
, 0, 56))
3190 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, 56);
3193 if (opnd
->imm
.value
% 8 != 0)
3195 set_other_error (mismatch_detail
, idx
,
3196 _("byte index must be a multiple of 8"));
3201 case AARCH64_OPND_SME_ZT0_INDEX2_12
:
3202 if (!value_in_range_p (opnd
->imm
.value
, 0, 3))
3204 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, 3);
3214 case AARCH64_OPND_CLASS_SYSTEM
:
3217 case AARCH64_OPND_PSTATEFIELD
:
3218 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
3219 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
3221 assert (aarch64_pstatefields
[i
].name
);
3222 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
3223 max_value
= F_GET_REG_MAX_VALUE (aarch64_pstatefields
[i
].flags
);
3224 if (opnds
[1].imm
.value
< 0 || opnds
[1].imm
.value
> max_value
)
3226 set_imm_out_of_range_error (mismatch_detail
, 1, 0, max_value
);
3230 case AARCH64_OPND_PRFOP
:
3231 if (opcode
->iclass
== ldst_regoff
&& opnd
->prfop
->value
>= 24)
3233 set_other_error (mismatch_detail
, idx
,
3234 _("the register-index form of PRFM does"
3235 " not accept opcodes in the range 24-31"));
3244 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
3245 /* Get the upper bound for the element index. */
3246 if (opcode
->op
== OP_FCMLA_ELEM
)
3247 /* FCMLA index range depends on the vector size of other operands
3248 and is halfed because complex numbers take two elements. */
3249 num
= aarch64_get_qualifier_nelem (opnds
[0].qualifier
)
3250 * aarch64_get_qualifier_esize (opnds
[0].qualifier
) / 2;
3251 else if (opcode
->iclass
== lut
)
3253 size
= get_operand_fields_width (get_operand_from_code (type
)) - 5;
3254 if (!check_reglane (opnd
, mismatch_detail
, idx
, "v", 0, 31,
3255 0, (1 << size
) - 1))
3261 num
= num
/ aarch64_get_qualifier_esize (qualifier
) - 1;
3262 assert (aarch64_get_qualifier_nelem (qualifier
) == 1);
3264 /* Index out-of-range. */
3265 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
3267 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
3270 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
3271 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
3272 number is encoded in "size:M:Rm":
3278 if (type
== AARCH64_OPND_Em16
3279 && (qualifier
== AARCH64_OPND_QLF_S_H
3280 || qualifier
== AARCH64_OPND_QLF_S_2B
)
3281 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
3283 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
3286 if (type
== AARCH64_OPND_Em8
3287 && !value_in_range_p (opnd
->reglane
.regno
, 0, 7))
3289 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 7);
3294 case AARCH64_OPND_CLASS_MODIFIED_REG
:
3295 assert (idx
== 1 || idx
== 2);
3298 case AARCH64_OPND_Rm_EXT
:
3299 if (!aarch64_extend_operator_p (opnd
->shifter
.kind
)
3300 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
3302 set_other_error (mismatch_detail
, idx
,
3303 _("extend operator expected"));
3306 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
3307 (i.e. SP), in which case it defaults to LSL. The LSL alias is
3308 only valid when "Rd" or "Rn" is '11111', and is preferred in that
3310 if (!aarch64_stack_pointer_p (opnds
+ 0)
3311 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
3313 if (!opnd
->shifter
.operator_present
)
3315 set_other_error (mismatch_detail
, idx
,
3316 _("missing extend operator"));
3319 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3321 set_other_error (mismatch_detail
, idx
,
3322 _("'LSL' operator not allowed"));
3326 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
3327 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
3328 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
3330 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
3333 /* In the 64-bit form, the final register operand is written as Wm
3334 for all but the (possibly omitted) UXTX/LSL and SXTX
3336 N.B. GAS allows X register to be used with any operator as a
3337 programming convenience. */
3338 if (qualifier
== AARCH64_OPND_QLF_X
3339 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
3340 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
3341 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
3343 set_other_error (mismatch_detail
, idx
, _("W register expected"));
3348 case AARCH64_OPND_Rm_SFT
:
3349 /* ROR is not available to the shifted register operand in
3350 arithmetic instructions. */
3351 if (!aarch64_shift_operator_p (opnd
->shifter
.kind
))
3353 set_other_error (mismatch_detail
, idx
,
3354 _("shift operator expected"));
3357 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
3358 && opcode
->iclass
!= log_shift
)
3360 set_other_error (mismatch_detail
, idx
,
3361 _("'ROR' operator not allowed"));
3364 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
3365 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
3367 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
3372 case AARCH64_OPND_Rm_LSL
:
3373 /* We expect here that opnd->shifter.kind != AARCH64_MOD_LSL
3374 because the parser already restricts the type of shift to LSL only,
3375 so another check of shift kind would be redundant. */
3376 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 7))
3378 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 7);
3395 /* Main entrypoint for the operand constraint checking.
3397 Return 1 if operands of *INST meet the constraint applied by the operand
3398 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
3399 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
3400 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
3401 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
3402 error kind when it is notified that an instruction does not pass the check).
3404 Un-determined operand qualifiers may get established during the process. */
3407 aarch64_match_operands_constraint (aarch64_inst
*inst
,
3408 aarch64_operand_error
*mismatch_detail
)
3412 DEBUG_TRACE ("enter");
3414 i
= inst
->opcode
->tied_operand
;
3418 /* Check for tied_operands with specific opcode iclass. */
3419 switch (inst
->opcode
->iclass
)
3421 /* For SME LDR and STR instructions #imm must have the same numerical
3422 value for both operands.
3426 assert (inst
->operands
[0].type
== AARCH64_OPND_SME_ZA_array_off4
);
3427 assert (inst
->operands
[1].type
== AARCH64_OPND_SME_ADDR_RI_U4xVL
);
3428 if (inst
->operands
[0].indexed_za
.index
.imm
3429 != inst
->operands
[1].addr
.offset
.imm
)
3431 if (mismatch_detail
)
3433 mismatch_detail
->kind
= AARCH64_OPDE_UNTIED_IMMS
;
3434 mismatch_detail
->index
= i
;
3442 /* Check for cases where a source register needs to be the
3443 same as the destination register. Do this before
3444 matching qualifiers since if an instruction has both
3445 invalid tying and invalid qualifiers, the error about
3446 qualifiers would suggest several alternative instructions
3447 that also have invalid tying. */
3448 enum aarch64_operand_class op_class
3449 = aarch64_get_operand_class (inst
->operands
[0].type
);
3450 assert (aarch64_get_operand_class (inst
->operands
[i
].type
)
3452 if (op_class
== AARCH64_OPND_CLASS_SVE_REGLIST
3453 ? ((inst
->operands
[0].reglist
.first_regno
3454 != inst
->operands
[i
].reglist
.first_regno
)
3455 || (inst
->operands
[0].reglist
.num_regs
3456 != inst
->operands
[i
].reglist
.num_regs
)
3457 || (inst
->operands
[0].reglist
.stride
3458 != inst
->operands
[i
].reglist
.stride
))
3459 : (inst
->operands
[0].reg
.regno
3460 != inst
->operands
[i
].reg
.regno
))
3462 if (mismatch_detail
)
3464 mismatch_detail
->kind
= AARCH64_OPDE_UNTIED_OPERAND
;
3465 mismatch_detail
->index
= i
;
3466 mismatch_detail
->error
= NULL
;
3475 /* Match operands' qualifier.
3476 *INST has already had qualifier establish for some, if not all, of
3477 its operands; we need to find out whether these established
3478 qualifiers match one of the qualifier sequence in
3479 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
3480 with the corresponding qualifier in such a sequence.
3481 Only basic operand constraint checking is done here; the more thorough
3482 constraint checking will carried out by operand_general_constraint_met_p,
3483 which has be to called after this in order to get all of the operands'
3484 qualifiers established. */
3486 if (match_operands_qualifier (inst
, true /* update_p */,
3487 &invalid_count
) == 0)
3489 DEBUG_TRACE ("FAIL on operand qualifier matching");
3490 if (mismatch_detail
)
3492 /* Return an error type to indicate that it is the qualifier
3493 matching failure; we don't care about which operand as there
3494 are enough information in the opcode table to reproduce it. */
3495 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
3496 mismatch_detail
->index
= -1;
3497 mismatch_detail
->error
= NULL
;
3498 mismatch_detail
->data
[0].i
= invalid_count
;
3503 /* Match operands' constraint. */
3504 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
3506 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
3507 if (type
== AARCH64_OPND_NIL
)
3509 if (inst
->operands
[i
].skip
)
3511 DEBUG_TRACE ("skip the incomplete operand %d", i
);
3514 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
3515 inst
->opcode
, mismatch_detail
) == 0)
3517 DEBUG_TRACE ("FAIL on operand %d", i
);
3522 DEBUG_TRACE ("PASS");
3527 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
3528 Also updates the TYPE of each INST->OPERANDS with the corresponding
3529 value of OPCODE->OPERANDS.
3531 Note that some operand qualifiers may need to be manually cleared by
3532 the caller before it further calls the aarch64_opcode_encode; by
3533 doing this, it helps the qualifier matching facilities work
3536 const aarch64_opcode
*
3537 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
3540 const aarch64_opcode
*old
= inst
->opcode
;
3542 inst
->opcode
= opcode
;
3544 /* Update the operand types. */
3545 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
3547 inst
->operands
[i
].type
= opcode
->operands
[i
];
3548 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
3552 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
3558 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
3561 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
3562 if (operands
[i
] == operand
)
3564 else if (operands
[i
] == AARCH64_OPND_NIL
)
3569 /* R0...R30, followed by FOR31. */
3570 #define BANK(R, FOR31) \
3571 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
3572 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
3573 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
3574 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
3575 /* [0][0] 32-bit integer regs with sp Wn
3576 [0][1] 64-bit integer regs with sp Xn sf=1
3577 [1][0] 32-bit integer regs with #0 Wn
3578 [1][1] 64-bit integer regs with #0 Xn sf=1 */
3579 static const char *int_reg
[2][2][32] = {
3580 #define R32(X) "w" #X
3581 #define R64(X) "x" #X
3582 { BANK (R32
, "wsp"), BANK (R64
, "sp") },
3583 { BANK (R32
, "wzr"), BANK (R64
, "xzr") }
3588 /* Names of the SVE vector registers, first with .S suffixes,
3589 then with .D suffixes. */
3591 static const char *sve_reg
[2][32] = {
3592 #define ZS(X) "z" #X ".s"
3593 #define ZD(X) "z" #X ".d"
3594 BANK (ZS
, ZS (31)), BANK (ZD
, ZD (31))
3600 /* Return the integer register name.
3601 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
3603 static inline const char *
3604 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
3606 const int has_zr
= sp_reg_p
? 0 : 1;
3607 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
3608 return int_reg
[has_zr
][is_64
][regno
];
3611 /* Like get_int_reg_name, but IS_64 is always 1. */
3613 static inline const char *
3614 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
3616 const int has_zr
= sp_reg_p
? 0 : 1;
3617 return int_reg
[has_zr
][1][regno
];
3620 /* Get the name of the integer offset register in OPND, using the shift type
3621 to decide whether it's a word or doubleword. */
3623 static inline const char *
3624 get_offset_int_reg_name (const aarch64_opnd_info
*opnd
)
3626 switch (opnd
->shifter
.kind
)
3628 case AARCH64_MOD_UXTW
:
3629 case AARCH64_MOD_SXTW
:
3630 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_W
, 0);
3632 case AARCH64_MOD_LSL
:
3633 case AARCH64_MOD_SXTX
:
3634 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_X
, 0);
3641 /* Get the name of the SVE vector offset register in OPND, using the operand
3642 qualifier to decide whether the suffix should be .S or .D. */
3644 static inline const char *
3645 get_addr_sve_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
)
3647 assert (qualifier
== AARCH64_OPND_QLF_S_S
3648 || qualifier
== AARCH64_OPND_QLF_S_D
);
3649 return sve_reg
[qualifier
== AARCH64_OPND_QLF_S_D
][regno
];
3652 /* Types for expanding an encoded 8-bit value to a floating-point value. */
3672 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
3673 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
3674 (depending on the type of the instruction). IMM8 will be expanded to a
3675 single-precision floating-point value (SIZE == 4) or a double-precision
3676 floating-point value (SIZE == 8). A half-precision floating-point value
3677 (SIZE == 2) is expanded to a single-precision floating-point value. The
3678 expanded value is returned. */
3681 expand_fp_imm (int size
, uint32_t imm8
)
3684 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
3686 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
3687 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
3688 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
3689 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
3690 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
3693 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
3694 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
3695 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
3696 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
3697 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
3700 else if (size
== 4 || size
== 2)
3702 imm
= (imm8_7
<< 31) /* imm8<7> */
3703 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
3704 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
3705 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
3709 /* An unsupported size. */
3716 /* Return a string based on FMT with the register style applied. */
3719 style_reg (struct aarch64_styler
*styler
, const char *fmt
, ...)
3725 txt
= styler
->apply_style (styler
, dis_style_register
, fmt
, ap
);
3731 /* Return a string based on FMT with the immediate style applied. */
3734 style_imm (struct aarch64_styler
*styler
, const char *fmt
, ...)
3740 txt
= styler
->apply_style (styler
, dis_style_immediate
, fmt
, ap
);
3746 /* Return a string based on FMT with the sub-mnemonic style applied. */
3749 style_sub_mnem (struct aarch64_styler
*styler
, const char *fmt
, ...)
3755 txt
= styler
->apply_style (styler
, dis_style_sub_mnemonic
, fmt
, ap
);
3761 /* Return a string based on FMT with the address style applied. */
3764 style_addr (struct aarch64_styler
*styler
, const char *fmt
, ...)
3770 txt
= styler
->apply_style (styler
, dis_style_address
, fmt
, ap
);
3776 /* Produce the string representation of the register list operand *OPND
3777 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3778 the register name that comes before the register number, such as "v". */
3780 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
,
3781 const char *prefix
, struct aarch64_styler
*styler
)
3783 const int mask
= (prefix
[0] == 'p' ? 15 : 31);
3784 const int num_regs
= opnd
->reglist
.num_regs
;
3785 const int stride
= opnd
->reglist
.stride
;
3786 const int first_reg
= opnd
->reglist
.first_regno
;
3787 const int last_reg
= (first_reg
+ (num_regs
- 1) * stride
) & mask
;
3788 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
3789 char tb
[16]; /* Temporary buffer. */
3791 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
3792 assert (num_regs
>= 1 && num_regs
<= 4);
3794 /* Prepare the index if any. */
3795 if (opnd
->reglist
.has_index
)
3796 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3797 snprintf (tb
, sizeof (tb
), "[%s]",
3798 style_imm (styler
, "%" PRIi64
, (opnd
->reglist
.index
% 100)));
3802 /* The hyphenated form is preferred for disassembly if there is
3803 more than one register in the list, and the register numbers
3804 are monotonically increasing in increments of one. */
3805 if (stride
== 1 && num_regs
> 1)
3806 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3807 snprintf (buf
, size
, "{%s-%s}%s",
3808 style_reg (styler
, "%s%d", prefix
, first_reg
),
3809 style_reg (styler
, "%s%d", prefix
, last_reg
), tb
);
3811 snprintf (buf
, size
, "{%s-%s}%s",
3812 style_reg (styler
, "%s%d.%s", prefix
, first_reg
, qlf_name
),
3813 style_reg (styler
, "%s%d.%s", prefix
, last_reg
, qlf_name
), tb
);
3816 const int reg0
= first_reg
;
3817 const int reg1
= (first_reg
+ stride
) & mask
;
3818 const int reg2
= (first_reg
+ stride
* 2) & mask
;
3819 const int reg3
= (first_reg
+ stride
* 3) & mask
;
3824 snprintf (buf
, size
, "{%s}%s",
3825 style_reg (styler
, "%s%d.%s", prefix
, reg0
, qlf_name
),
3829 snprintf (buf
, size
, "{%s, %s}%s",
3830 style_reg (styler
, "%s%d.%s", prefix
, reg0
, qlf_name
),
3831 style_reg (styler
, "%s%d.%s", prefix
, reg1
, qlf_name
),
3835 snprintf (buf
, size
, "{%s, %s, %s}%s",
3836 style_reg (styler
, "%s%d.%s", prefix
, reg0
, qlf_name
),
3837 style_reg (styler
, "%s%d.%s", prefix
, reg1
, qlf_name
),
3838 style_reg (styler
, "%s%d.%s", prefix
, reg2
, qlf_name
),
3842 snprintf (buf
, size
, "{%s, %s, %s, %s}%s",
3843 style_reg (styler
, "%s%d.%s", prefix
, reg0
, qlf_name
),
3844 style_reg (styler
, "%s%d.%s", prefix
, reg1
, qlf_name
),
3845 style_reg (styler
, "%s%d.%s", prefix
, reg2
, qlf_name
),
3846 style_reg (styler
, "%s%d.%s", prefix
, reg3
, qlf_name
),
3853 /* Print the register+immediate address in OPND to BUF, which has SIZE
3854 characters. BASE is the name of the base register. */
3857 print_immediate_offset_address (char *buf
, size_t size
,
3858 const aarch64_opnd_info
*opnd
,
3860 struct aarch64_styler
*styler
)
3862 if (opnd
->addr
.writeback
)
3864 if (opnd
->addr
.preind
)
3866 if (opnd
->type
== AARCH64_OPND_ADDR_SIMM10
&& !opnd
->addr
.offset
.imm
)
3867 snprintf (buf
, size
, "[%s]!", style_reg (styler
, base
));
3869 snprintf (buf
, size
, "[%s, %s]!",
3870 style_reg (styler
, base
),
3871 style_imm (styler
, "#%d", opnd
->addr
.offset
.imm
));
3874 snprintf (buf
, size
, "[%s], %s",
3875 style_reg (styler
, base
),
3876 style_imm (styler
, "#%d", opnd
->addr
.offset
.imm
));
3880 if (opnd
->shifter
.operator_present
)
3882 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL_VL
);
3883 snprintf (buf
, size
, "[%s, %s, %s]",
3884 style_reg (styler
, base
),
3885 style_imm (styler
, "#%d", opnd
->addr
.offset
.imm
),
3886 style_sub_mnem (styler
, "mul vl"));
3888 else if (opnd
->addr
.offset
.imm
)
3889 snprintf (buf
, size
, "[%s, %s]",
3890 style_reg (styler
, base
),
3891 style_imm (styler
, "#%d", opnd
->addr
.offset
.imm
));
3893 snprintf (buf
, size
, "[%s]", style_reg (styler
, base
));
3897 /* Produce the string representation of the register offset address operand
3898 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3899 the names of the base and offset registers. */
3901 print_register_offset_address (char *buf
, size_t size
,
3902 const aarch64_opnd_info
*opnd
,
3903 const char *base
, const char *offset
,
3904 struct aarch64_styler
*styler
)
3906 char tb
[32]; /* Temporary buffer. */
3907 bool print_extend_p
= true;
3908 bool print_amount_p
= true;
3909 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
3911 /* This is the case where offset is the optional argument and the optional
3912 argument is ignored in the disassembly. */
3913 if (opnd
->type
== AARCH64_OPND_SVE_ADDR_ZX
&& offset
!= NULL
3914 && strcmp (offset
,"xzr") == 0)
3916 /* Example: [<Zn>.S{, <Xm>}].
3917 When the assembly is [Z0.S, XZR] or [Z0.S], Xm is XZR in both the cases
3918 and the preferred disassembly is [Z0.S], ignoring the optional Xm. */
3919 snprintf (buf
, size
, "[%s]", style_reg (styler
, base
));
3923 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
3924 || !opnd
->shifter
.amount_present
))
3926 /* Not print the shift/extend amount when the amount is zero and
3927 when it is not the special case of 8-bit load/store
3929 print_amount_p
= false;
3930 /* Likewise, no need to print the shift operator LSL in such a
3932 if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3933 print_extend_p
= false;
3936 /* Prepare for the extend/shift. */
3940 snprintf (tb
, sizeof (tb
), ", %s %s",
3941 style_sub_mnem (styler
, shift_name
),
3942 style_imm (styler
, "#%" PRIi64
,
3943 /* PR 21096: The %100 is to silence a warning about possible
3945 (opnd
->shifter
.amount
% 100)));
3947 snprintf (tb
, sizeof (tb
), ", %s",
3948 style_sub_mnem (styler
, shift_name
));
3953 snprintf (buf
, size
, "[%s, %s%s]", style_reg (styler
, base
),
3954 style_reg (styler
, offset
), tb
);
3958 /* Print ZA tiles from imm8 in ZERO instruction.
3960 The preferred disassembly of this instruction uses the shortest list of tile
3961 names that represent the encoded immediate mask.
3964 * An all-ones immediate is disassembled as {ZA}.
3965 * An all-zeros immediate is disassembled as an empty list { }.
3968 print_sme_za_list (char *buf
, size_t size
, int mask
,
3969 struct aarch64_styler
*styler
)
3971 const char* zan
[] = { "za", "za0.h", "za1.h", "za0.s",
3972 "za1.s", "za2.s", "za3.s", "za0.d",
3973 "za1.d", "za2.d", "za3.d", "za4.d",
3974 "za5.d", "za6.d", "za7.d", " " };
3975 const int zan_v
[] = { 0xff, 0x55, 0xaa, 0x11,
3976 0x22, 0x44, 0x88, 0x01,
3977 0x02, 0x04, 0x08, 0x10,
3978 0x20, 0x40, 0x80, 0x00 };
3980 const int ZAN_SIZE
= sizeof(zan
) / sizeof(zan
[0]);
3982 k
= snprintf (buf
, size
, "{");
3983 for (i
= 0; i
< ZAN_SIZE
; i
++)
3985 if ((mask
& zan_v
[i
]) == zan_v
[i
])
3989 k
+= snprintf (buf
+ k
, size
- k
, ", ");
3991 k
+= snprintf (buf
+ k
, size
- k
, "%s", style_reg (styler
, zan
[i
]));
3996 snprintf (buf
+ k
, size
- k
, "}");
3999 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
4000 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
4001 PC, PCREL_P and ADDRESS are used to pass in and return information about
4002 the PC-relative address calculation, where the PC value is passed in
4003 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
4004 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
4005 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
4007 The function serves both the disassembler and the assembler diagnostics
4008 issuer, which is the reason why it lives in this file. */
4011 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
4012 const aarch64_opcode
*opcode
,
4013 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
4014 bfd_vma
*address
, char** notes
,
4015 char *comment
, size_t comment_size
,
4016 aarch64_feature_set features
,
4017 struct aarch64_styler
*styler
)
4019 unsigned int i
, num_conds
;
4020 const char *name
= NULL
;
4021 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
4022 enum aarch64_modifier_kind kind
;
4023 uint64_t addr
, enum_value
;
4025 if (comment
!= NULL
)
4027 assert (comment_size
> 0);
4031 assert (comment_size
== 0);
4039 case AARCH64_OPND_Rd
:
4040 case AARCH64_OPND_Rn
:
4041 case AARCH64_OPND_Rm
:
4042 case AARCH64_OPND_Rt
:
4043 case AARCH64_OPND_Rt2
:
4044 case AARCH64_OPND_Rs
:
4045 case AARCH64_OPND_Ra
:
4046 case AARCH64_OPND_Rt_IN_SYS_ALIASES
:
4047 case AARCH64_OPND_Rt_LS64
:
4048 case AARCH64_OPND_Rt_SYS
:
4049 case AARCH64_OPND_PAIRREG
:
4050 case AARCH64_OPND_PAIRREG_OR_XZR
:
4051 case AARCH64_OPND_SVE_Rm
:
4052 case AARCH64_OPND_LSE128_Rt
:
4053 case AARCH64_OPND_LSE128_Rt2
:
4054 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
4055 the <ic_op>, therefore we use opnd->present to override the
4056 generic optional-ness information. */
4057 if (opnd
->type
== AARCH64_OPND_Rt_SYS
)
4062 else if ((opnd
->type
== AARCH64_OPND_Rt_IN_SYS_ALIASES
)
4064 != get_optional_operand_default_value (opcode
)))
4066 /* Avoid printing an invalid additional value for Rt in SYS aliases such as
4067 BRB, provide a helpful comment instead */
4068 snprintf (comment
, comment_size
, "unpredictable encoding (Rt!=31): #%u", opnd
->reg
.regno
);
4071 /* Omit the operand, e.g. RET. */
4072 else if (optional_operand_p (opcode
, idx
)
4074 == get_optional_operand_default_value (opcode
)))
4076 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
4077 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
4078 snprintf (buf
, size
, "%s",
4079 style_reg (styler
, get_int_reg_name (opnd
->reg
.regno
,
4080 opnd
->qualifier
, 0)));
4083 case AARCH64_OPND_Rd_SP
:
4084 case AARCH64_OPND_Rn_SP
:
4085 case AARCH64_OPND_Rt_SP
:
4086 case AARCH64_OPND_SVE_Rn_SP
:
4087 case AARCH64_OPND_Rm_SP
:
4088 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
4089 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
4090 || opnd
->qualifier
== AARCH64_OPND_QLF_X
4091 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
4092 snprintf (buf
, size
, "%s",
4093 style_reg (styler
, get_int_reg_name (opnd
->reg
.regno
,
4094 opnd
->qualifier
, 1)));
4097 case AARCH64_OPND_Rm_EXT
:
4098 kind
= opnd
->shifter
.kind
;
4099 assert (idx
== 1 || idx
== 2);
4100 if ((aarch64_stack_pointer_p (opnds
)
4101 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
4102 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
4103 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
4104 && kind
== AARCH64_MOD_UXTW
)
4105 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
4106 && kind
== AARCH64_MOD_UXTX
)))
4108 /* 'LSL' is the preferred form in this case. */
4109 kind
= AARCH64_MOD_LSL
;
4110 if (opnd
->shifter
.amount
== 0)
4112 /* Shifter omitted. */
4113 snprintf (buf
, size
, "%s",
4115 get_int_reg_name (opnd
->reg
.regno
,
4116 opnd
->qualifier
, 0)));
4120 if (opnd
->shifter
.amount
)
4121 snprintf (buf
, size
, "%s, %s %s",
4122 style_reg (styler
, get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0)),
4123 style_sub_mnem (styler
, aarch64_operand_modifiers
[kind
].name
),
4124 style_imm (styler
, "#%" PRIi64
, opnd
->shifter
.amount
));
4126 snprintf (buf
, size
, "%s, %s",
4127 style_reg (styler
, get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0)),
4128 style_sub_mnem (styler
, aarch64_operand_modifiers
[kind
].name
));
4131 case AARCH64_OPND_Rm_SFT
:
4132 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
4133 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
4134 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
4135 snprintf (buf
, size
, "%s",
4136 style_reg (styler
, get_int_reg_name (opnd
->reg
.regno
,
4137 opnd
->qualifier
, 0)));
4139 snprintf (buf
, size
, "%s, %s %s",
4140 style_reg (styler
, get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0)),
4141 style_sub_mnem (styler
, aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
),
4142 style_imm (styler
, "#%" PRIi64
, opnd
->shifter
.amount
));
4145 case AARCH64_OPND_Rm_LSL
:
4146 assert (opnd
->qualifier
== AARCH64_OPND_QLF_X
);
4147 assert (opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
4148 if (opnd
->shifter
.amount
== 0)
4149 snprintf (buf
, size
, "%s",
4150 style_reg (styler
, get_int_reg_name (opnd
->reg
.regno
,
4151 opnd
->qualifier
, 0)));
4153 snprintf (buf
, size
, "%s, %s %s",
4154 style_reg (styler
, get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0)),
4155 style_sub_mnem (styler
, aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
),
4156 style_imm (styler
, "#%" PRIi64
, opnd
->shifter
.amount
));
4159 case AARCH64_OPND_Fd
:
4160 case AARCH64_OPND_Fn
:
4161 case AARCH64_OPND_Fm
:
4162 case AARCH64_OPND_Fa
:
4163 case AARCH64_OPND_Ft
:
4164 case AARCH64_OPND_Ft2
:
4165 case AARCH64_OPND_Sd
:
4166 case AARCH64_OPND_Sn
:
4167 case AARCH64_OPND_Sm
:
4168 case AARCH64_OPND_SVE_VZn
:
4169 case AARCH64_OPND_SVE_Vd
:
4170 case AARCH64_OPND_SVE_Vm
:
4171 case AARCH64_OPND_SVE_Vn
:
4172 snprintf (buf
, size
, "%s",
4173 style_reg (styler
, "%s%d",
4174 aarch64_get_qualifier_name (opnd
->qualifier
),
4178 case AARCH64_OPND_Va
:
4179 case AARCH64_OPND_Vd
:
4180 case AARCH64_OPND_Vn
:
4181 case AARCH64_OPND_Vm
:
4182 snprintf (buf
, size
, "%s",
4183 style_reg (styler
, "v%d.%s", opnd
->reg
.regno
,
4184 aarch64_get_qualifier_name (opnd
->qualifier
)));
4187 case AARCH64_OPND_Ed
:
4188 case AARCH64_OPND_En
:
4189 case AARCH64_OPND_Em
:
4190 case AARCH64_OPND_Em16
:
4191 case AARCH64_OPND_Em8
:
4192 case AARCH64_OPND_SM3_IMM2
:
4193 snprintf (buf
, size
, "%s[%s]",
4194 style_reg (styler
, "v%d.%s", opnd
->reglane
.regno
,
4195 aarch64_get_qualifier_name (opnd
->qualifier
)),
4196 style_imm (styler
, "%" PRIi64
, opnd
->reglane
.index
));
4199 case AARCH64_OPND_Em_INDEX1_14
:
4200 case AARCH64_OPND_Em_INDEX2_13
:
4201 case AARCH64_OPND_Em_INDEX3_12
:
4202 snprintf (buf
, size
, "%s[%s]",
4203 style_reg (styler
, "v%d", opnd
->reglane
.regno
),
4204 style_imm (styler
, "%" PRIi64
, opnd
->reglane
.index
));
4207 case AARCH64_OPND_VdD1
:
4208 case AARCH64_OPND_VnD1
:
4209 snprintf (buf
, size
, "%s[%s]",
4210 style_reg (styler
, "v%d.d", opnd
->reg
.regno
),
4211 style_imm (styler
, "1"));
4214 case AARCH64_OPND_LVn
:
4215 case AARCH64_OPND_LVn_LUT
:
4216 case AARCH64_OPND_LVt
:
4217 case AARCH64_OPND_LVt_AL
:
4218 case AARCH64_OPND_LEt
:
4219 print_register_list (buf
, size
, opnd
, "v", styler
);
4222 case AARCH64_OPND_SVE_Pd
:
4223 case AARCH64_OPND_SVE_Pg3
:
4224 case AARCH64_OPND_SVE_Pg4_5
:
4225 case AARCH64_OPND_SVE_Pg4_10
:
4226 case AARCH64_OPND_SVE_Pg4_16
:
4227 case AARCH64_OPND_SVE_Pm
:
4228 case AARCH64_OPND_SVE_Pn
:
4229 case AARCH64_OPND_SVE_Pt
:
4230 case AARCH64_OPND_SME_Pm
:
4231 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
4232 snprintf (buf
, size
, "%s",
4233 style_reg (styler
, "p%d", opnd
->reg
.regno
));
4234 else if (opnd
->qualifier
== AARCH64_OPND_QLF_P_Z
4235 || opnd
->qualifier
== AARCH64_OPND_QLF_P_M
)
4236 snprintf (buf
, size
, "%s",
4237 style_reg (styler
, "p%d/%s", opnd
->reg
.regno
,
4238 aarch64_get_qualifier_name (opnd
->qualifier
)));
4240 snprintf (buf
, size
, "%s",
4241 style_reg (styler
, "p%d.%s", opnd
->reg
.regno
,
4242 aarch64_get_qualifier_name (opnd
->qualifier
)));
4245 case AARCH64_OPND_SVE_PNd
:
4246 case AARCH64_OPND_SVE_PNg4_10
:
4247 case AARCH64_OPND_SVE_PNn
:
4248 case AARCH64_OPND_SVE_PNt
:
4249 case AARCH64_OPND_SME_PNd3
:
4250 case AARCH64_OPND_SME_PNg3
:
4251 case AARCH64_OPND_SME_PNn
:
4252 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
4253 snprintf (buf
, size
, "%s",
4254 style_reg (styler
, "pn%d", opnd
->reg
.regno
));
4255 else if (opnd
->qualifier
== AARCH64_OPND_QLF_P_Z
4256 || opnd
->qualifier
== AARCH64_OPND_QLF_P_M
)
4257 snprintf (buf
, size
, "%s",
4258 style_reg (styler
, "pn%d/%s", opnd
->reg
.regno
,
4259 aarch64_get_qualifier_name (opnd
->qualifier
)));
4261 snprintf (buf
, size
, "%s",
4262 style_reg (styler
, "pn%d.%s", opnd
->reg
.regno
,
4263 aarch64_get_qualifier_name (opnd
->qualifier
)));
4266 case AARCH64_OPND_SME_Pdx2
:
4267 case AARCH64_OPND_SME_PdxN
:
4268 print_register_list (buf
, size
, opnd
, "p", styler
);
4271 case AARCH64_OPND_SME_PNn3_INDEX1
:
4272 case AARCH64_OPND_SME_PNn3_INDEX2
:
4273 snprintf (buf
, size
, "%s[%s]",
4274 style_reg (styler
, "pn%d", opnd
->reglane
.regno
),
4275 style_imm (styler
, "%" PRIi64
, opnd
->reglane
.index
));
4278 case AARCH64_OPND_SVE_Za_5
:
4279 case AARCH64_OPND_SVE_Za_16
:
4280 case AARCH64_OPND_SVE_Zd
:
4281 case AARCH64_OPND_SVE_Zm_5
:
4282 case AARCH64_OPND_SVE_Zm_16
:
4283 case AARCH64_OPND_SVE_Zn
:
4284 case AARCH64_OPND_SVE_Zt
:
4285 case AARCH64_OPND_SME_Zm
:
4286 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
4287 snprintf (buf
, size
, "%s", style_reg (styler
, "z%d", opnd
->reg
.regno
));
4289 snprintf (buf
, size
, "%s",
4290 style_reg (styler
, "z%d.%s", opnd
->reg
.regno
,
4291 aarch64_get_qualifier_name (opnd
->qualifier
)));
4294 case AARCH64_OPND_SVE_ZnxN
:
4295 case AARCH64_OPND_SVE_ZtxN
:
4296 case AARCH64_OPND_SME_Zdnx2
:
4297 case AARCH64_OPND_SME_Zdnx4
:
4298 case AARCH64_OPND_SME_Zdnx4_STRIDED
:
4299 case AARCH64_OPND_SME_Zmx2
:
4300 case AARCH64_OPND_SME_Zmx4
:
4301 case AARCH64_OPND_SME_Znx2
:
4302 case AARCH64_OPND_SME_Znx2_BIT_INDEX
:
4303 case AARCH64_OPND_SME_Znx4
:
4304 case AARCH64_OPND_SME_Ztx2_STRIDED
:
4305 case AARCH64_OPND_SME_Ztx4_STRIDED
:
4306 print_register_list (buf
, size
, opnd
, "z", styler
);
4309 case AARCH64_OPND_SVE_Zm1_23_INDEX
:
4310 case AARCH64_OPND_SVE_Zm2_22_INDEX
:
4311 case AARCH64_OPND_SVE_Zm3_INDEX
:
4312 case AARCH64_OPND_SVE_Zm3_22_INDEX
:
4313 case AARCH64_OPND_SVE_Zm3_19_INDEX
:
4314 case AARCH64_OPND_SVE_Zm3_12_INDEX
:
4315 case AARCH64_OPND_SVE_Zm3_11_INDEX
:
4316 case AARCH64_OPND_SVE_Zm3_10_INDEX
:
4317 case AARCH64_OPND_SVE_Zm4_11_INDEX
:
4318 case AARCH64_OPND_SVE_Zm4_INDEX
:
4319 case AARCH64_OPND_SVE_Zn_INDEX
:
4320 case AARCH64_OPND_SME_Zm_INDEX1
:
4321 case AARCH64_OPND_SME_Zm_INDEX2
:
4322 case AARCH64_OPND_SME_Zm_INDEX2_3
:
4323 case AARCH64_OPND_SME_Zm_INDEX3_1
:
4324 case AARCH64_OPND_SME_Zm_INDEX3_2
:
4325 case AARCH64_OPND_SME_Zm_INDEX3_3
:
4326 case AARCH64_OPND_SME_Zm_INDEX3_10
:
4327 case AARCH64_OPND_SVE_Zn_5_INDEX
:
4328 case AARCH64_OPND_SME_Zm_INDEX4_1
:
4329 case AARCH64_OPND_SME_Zm_INDEX4_2
:
4330 case AARCH64_OPND_SME_Zm_INDEX4_3
:
4331 case AARCH64_OPND_SME_Zm_INDEX4_10
:
4332 case AARCH64_OPND_SME_Zn_INDEX1_16
:
4333 case AARCH64_OPND_SME_Zn_INDEX2_15
:
4334 case AARCH64_OPND_SME_Zn_INDEX2_16
:
4335 case AARCH64_OPND_SME_Zn_INDEX3_14
:
4336 case AARCH64_OPND_SME_Zn_INDEX3_15
:
4337 case AARCH64_OPND_SME_Zn_INDEX4_14
:
4338 snprintf (buf
, size
, "%s[%s]",
4339 (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
4340 ? style_reg (styler
, "z%d", opnd
->reglane
.regno
)
4341 : style_reg (styler
, "z%d.%s", opnd
->reglane
.regno
,
4342 aarch64_get_qualifier_name (opnd
->qualifier
))),
4343 style_imm (styler
, "%" PRIi64
, opnd
->reglane
.index
));
4346 case AARCH64_OPND_SVE_Zn0_INDEX
:
4347 case AARCH64_OPND_SVE_Zn1_17_INDEX
:
4348 case AARCH64_OPND_SVE_Zn2_18_INDEX
:
4349 case AARCH64_OPND_SVE_Zn3_22_INDEX
:
4350 case AARCH64_OPND_SVE_Zd0_INDEX
:
4351 case AARCH64_OPND_SVE_Zd1_17_INDEX
:
4352 case AARCH64_OPND_SVE_Zd2_18_INDEX
:
4353 case AARCH64_OPND_SVE_Zd3_22_INDEX
:
4354 if (opnd
->reglane
.index
== 0)
4355 snprintf (buf
, size
, "%s", style_reg (styler
, "z%d", opnd
->reg
.regno
));
4357 snprintf (buf
, size
, "%s[%s]",
4358 style_reg (styler
, "z%d", opnd
->reglane
.regno
),
4359 style_imm (styler
, "%" PRIi64
, opnd
->reglane
.index
));
4362 case AARCH64_OPND_SME_ZAda_1b
:
4363 case AARCH64_OPND_SME_ZAda_2b
:
4364 case AARCH64_OPND_SME_ZAda_3b
:
4365 snprintf (buf
, size
, "%s",
4366 style_reg (styler
, "za%d.%s", opnd
->reg
.regno
,
4367 aarch64_get_qualifier_name (opnd
->qualifier
)));
4370 case AARCH64_OPND_SME_ZA_HV_idx_src
:
4371 case AARCH64_OPND_SME_ZA_HV_idx_srcxN
:
4372 case AARCH64_OPND_SME_ZA_HV_idx_dest
:
4373 case AARCH64_OPND_SME_ZA_HV_idx_destxN
:
4374 case AARCH64_OPND_SME_ZA_HV_idx_ldstr
:
4375 snprintf (buf
, size
, "%s%s[%s, %s%s%s%s%s]%s",
4376 opnd
->type
== AARCH64_OPND_SME_ZA_HV_idx_ldstr
? "{" : "",
4377 style_reg (styler
, "za%d%c.%s",
4378 opnd
->indexed_za
.regno
,
4379 opnd
->indexed_za
.v
== 1 ? 'v' : 'h',
4380 aarch64_get_qualifier_name (opnd
->qualifier
)),
4381 style_reg (styler
, "w%d", opnd
->indexed_za
.index
.regno
),
4382 style_imm (styler
, "%" PRIi64
, opnd
->indexed_za
.index
.imm
),
4383 opnd
->indexed_za
.index
.countm1
? ":" : "",
4384 (opnd
->indexed_za
.index
.countm1
4385 ? style_imm (styler
, "%d",
4386 opnd
->indexed_za
.index
.imm
4387 + opnd
->indexed_za
.index
.countm1
)
4389 opnd
->indexed_za
.group_size
? ", " : "",
4390 opnd
->indexed_za
.group_size
== 2
4391 ? style_sub_mnem (styler
, "vgx2")
4392 : opnd
->indexed_za
.group_size
== 4
4393 ? style_sub_mnem (styler
, "vgx4") : "",
4394 opnd
->type
== AARCH64_OPND_SME_ZA_HV_idx_ldstr
? "}" : "");
4397 case AARCH64_OPND_SME_list_of_64bit_tiles
:
4398 print_sme_za_list (buf
, size
, opnd
->imm
.value
, styler
);
4401 case AARCH64_OPND_SME_ZA_array_off1x4
:
4402 case AARCH64_OPND_SME_ZA_array_off2x2
:
4403 case AARCH64_OPND_SME_ZA_array_off2x4
:
4404 case AARCH64_OPND_SME_ZA_array_off3_0
:
4405 case AARCH64_OPND_SME_ZA_array_off3_5
:
4406 case AARCH64_OPND_SME_ZA_array_off3x2
:
4407 case AARCH64_OPND_SME_ZA_array_off4
:
4408 snprintf (buf
, size
, "%s[%s, %s%s%s%s%s]",
4409 style_reg (styler
, "za%s%s",
4410 opnd
->qualifier
== AARCH64_OPND_QLF_NIL
? "" : ".",
4411 (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
4413 : aarch64_get_qualifier_name (opnd
->qualifier
))),
4414 style_reg (styler
, "w%d", opnd
->indexed_za
.index
.regno
),
4415 style_imm (styler
, "%" PRIi64
, opnd
->indexed_za
.index
.imm
),
4416 opnd
->indexed_za
.index
.countm1
? ":" : "",
4417 (opnd
->indexed_za
.index
.countm1
4418 ? style_imm (styler
, "%d",
4419 opnd
->indexed_za
.index
.imm
4420 + opnd
->indexed_za
.index
.countm1
)
4422 opnd
->indexed_za
.group_size
? ", " : "",
4423 opnd
->indexed_za
.group_size
== 2
4424 ? style_sub_mnem (styler
, "vgx2")
4425 : opnd
->indexed_za
.group_size
== 4
4426 ? style_sub_mnem (styler
, "vgx4") : "");
4429 case AARCH64_OPND_SME_ZA_array_vrsb_1
:
4430 case AARCH64_OPND_SME_ZA_array_vrsh_1
:
4431 case AARCH64_OPND_SME_ZA_array_vrss_1
:
4432 case AARCH64_OPND_SME_ZA_array_vrsd_1
:
4433 case AARCH64_OPND_SME_ZA_array_vrsb_2
:
4434 case AARCH64_OPND_SME_ZA_array_vrsh_2
:
4435 case AARCH64_OPND_SME_ZA_array_vrss_2
:
4436 case AARCH64_OPND_SME_ZA_array_vrsd_2
:
4437 case AARCH64_OPND_SME_ZA_ARRAY4
:
4438 snprintf (buf
, size
, "%s [%s, %s%s%s]",
4439 style_reg (styler
, "za%d%c%s%s",
4440 opnd
->indexed_za
.regno
,
4441 opnd
->indexed_za
.v
? 'v': 'h',
4442 opnd
->qualifier
== AARCH64_OPND_QLF_NIL
? "" : ".",
4443 (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
4445 : aarch64_get_qualifier_name (opnd
->qualifier
))),
4446 style_reg (styler
, "w%d", opnd
->indexed_za
.index
.regno
),
4447 style_imm (styler
, "%" PRIi64
, opnd
->indexed_za
.index
.imm
),
4448 opnd
->indexed_za
.index
.countm1
? ":" : "",
4449 opnd
->indexed_za
.index
.countm1
? style_imm (styler
, "%d",
4450 opnd
->indexed_za
.index
.imm
4451 + opnd
->indexed_za
.index
.countm1
):"");
4454 case AARCH64_OPND_SME_SM_ZA
:
4455 snprintf (buf
, size
, "%s",
4456 style_reg (styler
, opnd
->reg
.regno
== 's' ? "sm" : "za"));
4459 case AARCH64_OPND_SME_PnT_Wm_imm
:
4460 snprintf (buf
, size
, "%s[%s, %s]",
4461 style_reg (styler
, "p%d.%s", opnd
->indexed_za
.regno
,
4462 aarch64_get_qualifier_name (opnd
->qualifier
)),
4463 style_reg (styler
, "w%d", opnd
->indexed_za
.index
.regno
),
4464 style_imm (styler
, "%" PRIi64
, opnd
->indexed_za
.index
.imm
));
4467 case AARCH64_OPND_SME_VLxN_10
:
4468 case AARCH64_OPND_SME_VLxN_13
:
4469 enum_value
= opnd
->imm
.value
;
4470 assert (enum_value
< ARRAY_SIZE (aarch64_sme_vlxn_array
));
4471 snprintf (buf
, size
, "%s",
4472 style_sub_mnem (styler
, aarch64_sme_vlxn_array
[enum_value
]));
4475 case AARCH64_OPND_BRBOP
:
4476 enum_value
= opnd
->imm
.value
;
4477 assert (enum_value
< ARRAY_SIZE (aarch64_brbop_array
));
4478 snprintf (buf
, size
, "%s",
4479 style_sub_mnem (styler
, aarch64_brbop_array
[enum_value
]));
4482 case AARCH64_OPND_CRn
:
4483 case AARCH64_OPND_CRm
:
4484 snprintf (buf
, size
, "%s",
4485 style_reg (styler
, "C%" PRIi64
, opnd
->imm
.value
));
4488 case AARCH64_OPND_IDX
:
4489 case AARCH64_OPND_MASK
:
4490 case AARCH64_OPND_IMM
:
4491 case AARCH64_OPND_IMM_2
:
4492 case AARCH64_OPND_WIDTH
:
4493 case AARCH64_OPND_UIMM3_OP1
:
4494 case AARCH64_OPND_UIMM3_OP2
:
4495 case AARCH64_OPND_BIT_NUM
:
4496 case AARCH64_OPND_IMM_VLSL
:
4497 case AARCH64_OPND_IMM_VLSR
:
4498 case AARCH64_OPND_SHLL_IMM
:
4499 case AARCH64_OPND_IMM0
:
4500 case AARCH64_OPND_IMMR
:
4501 case AARCH64_OPND_IMMS
:
4502 case AARCH64_OPND_UNDEFINED
:
4503 case AARCH64_OPND_FBITS
:
4504 case AARCH64_OPND_TME_UIMM16
:
4505 case AARCH64_OPND_SIMM5
:
4506 case AARCH64_OPND_SME_SHRIMM4
:
4507 case AARCH64_OPND_SME_SHRIMM5
:
4508 case AARCH64_OPND_SVE_SHLIMM_PRED
:
4509 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
4510 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22
:
4511 case AARCH64_OPND_SVE_SHRIMM_PRED
:
4512 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
4513 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22
:
4514 case AARCH64_OPND_SVE_SIMM5
:
4515 case AARCH64_OPND_SVE_SIMM5B
:
4516 case AARCH64_OPND_SVE_SIMM6
:
4517 case AARCH64_OPND_SVE_SIMM8
:
4518 case AARCH64_OPND_SVE_UIMM3
:
4519 case AARCH64_OPND_SVE_UIMM7
:
4520 case AARCH64_OPND_SVE_UIMM8
:
4521 case AARCH64_OPND_SVE_UIMM4
:
4522 case AARCH64_OPND_SVE_UIMM8_53
:
4523 case AARCH64_OPND_IMM_ROT1
:
4524 case AARCH64_OPND_IMM_ROT2
:
4525 case AARCH64_OPND_IMM_ROT3
:
4526 case AARCH64_OPND_SVE_IMM_ROT1
:
4527 case AARCH64_OPND_SVE_IMM_ROT2
:
4528 case AARCH64_OPND_SVE_IMM_ROT3
:
4529 case AARCH64_OPND_CSSC_SIMM8
:
4530 case AARCH64_OPND_CSSC_UIMM8
:
4531 snprintf (buf
, size
, "%s",
4532 style_imm (styler
, "#%" PRIi64
, opnd
->imm
.value
));
4535 case AARCH64_OPND_SVE_I1_HALF_ONE
:
4536 case AARCH64_OPND_SVE_I1_HALF_TWO
:
4537 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
4540 c
.i
= opnd
->imm
.value
;
4541 snprintf (buf
, size
, "%s", style_imm (styler
, "#%.1f", c
.f
));
4545 case AARCH64_OPND_SVE_PATTERN
:
4546 if (optional_operand_p (opcode
, idx
)
4547 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
4549 enum_value
= opnd
->imm
.value
;
4550 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
4551 if (aarch64_sve_pattern_array
[enum_value
])
4552 snprintf (buf
, size
, "%s",
4553 style_reg (styler
, aarch64_sve_pattern_array
[enum_value
]));
4555 snprintf (buf
, size
, "%s",
4556 style_imm (styler
, "#%" PRIi64
, opnd
->imm
.value
));
4559 case AARCH64_OPND_SVE_PATTERN_SCALED
:
4560 if (optional_operand_p (opcode
, idx
)
4561 && !opnd
->shifter
.operator_present
4562 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
4564 enum_value
= opnd
->imm
.value
;
4565 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
4566 if (aarch64_sve_pattern_array
[opnd
->imm
.value
])
4567 snprintf (buf
, size
, "%s",
4569 aarch64_sve_pattern_array
[opnd
->imm
.value
]));
4571 snprintf (buf
, size
, "%s",
4572 style_imm (styler
, "#%" PRIi64
, opnd
->imm
.value
));
4573 if (opnd
->shifter
.operator_present
)
4575 size_t len
= strlen (buf
);
4576 const char *shift_name
4577 = aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
4578 snprintf (buf
+ len
, size
- len
, ", %s %s",
4579 style_sub_mnem (styler
, shift_name
),
4580 style_imm (styler
, "#%" PRIi64
, opnd
->shifter
.amount
));
4584 case AARCH64_OPND_SVE_PRFOP
:
4585 enum_value
= opnd
->imm
.value
;
4586 assert (enum_value
< ARRAY_SIZE (aarch64_sve_prfop_array
));
4587 if (aarch64_sve_prfop_array
[enum_value
])
4588 snprintf (buf
, size
, "%s",
4589 style_reg (styler
, aarch64_sve_prfop_array
[enum_value
]));
4591 snprintf (buf
, size
, "%s",
4592 style_imm (styler
, "#%" PRIi64
, opnd
->imm
.value
));
4595 case AARCH64_OPND_IMM_MOV
:
4596 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
4598 case 4: /* e.g. MOV Wd, #<imm32>. */
4600 int imm32
= opnd
->imm
.value
;
4601 snprintf (buf
, size
, "%s",
4602 style_imm (styler
, "#0x%-20x", imm32
));
4603 snprintf (comment
, comment_size
, "#%d", imm32
);
4606 case 8: /* e.g. MOV Xd, #<imm64>. */
4607 snprintf (buf
, size
, "%s", style_imm (styler
, "#0x%-20" PRIx64
,
4609 snprintf (comment
, comment_size
, "#%" PRIi64
, opnd
->imm
.value
);
4612 snprintf (buf
, size
, "<invalid>");
4617 case AARCH64_OPND_FPIMM0
:
4618 snprintf (buf
, size
, "%s", style_imm (styler
, "#0.0"));
4621 case AARCH64_OPND_LIMM
:
4622 case AARCH64_OPND_AIMM
:
4623 case AARCH64_OPND_HALF
:
4624 case AARCH64_OPND_SVE_INV_LIMM
:
4625 case AARCH64_OPND_SVE_LIMM
:
4626 case AARCH64_OPND_SVE_LIMM_MOV
:
4627 if (opnd
->shifter
.amount
)
4628 snprintf (buf
, size
, "%s, %s %s",
4629 style_imm (styler
, "#0x%" PRIx64
, opnd
->imm
.value
),
4630 style_sub_mnem (styler
, "lsl"),
4631 style_imm (styler
, "#%" PRIi64
, opnd
->shifter
.amount
));
4633 snprintf (buf
, size
, "%s",
4634 style_imm (styler
, "#0x%" PRIx64
, opnd
->imm
.value
));
4637 case AARCH64_OPND_SIMD_IMM
:
4638 case AARCH64_OPND_SIMD_IMM_SFT
:
4639 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
4640 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
4641 snprintf (buf
, size
, "%s",
4642 style_imm (styler
, "#0x%" PRIx64
, opnd
->imm
.value
));
4644 snprintf (buf
, size
, "%s, %s %s",
4645 style_imm (styler
, "#0x%" PRIx64
, opnd
->imm
.value
),
4646 style_sub_mnem (styler
, aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
),
4647 style_imm (styler
, "#%" PRIi64
, opnd
->shifter
.amount
));
4650 case AARCH64_OPND_SVE_AIMM
:
4651 case AARCH64_OPND_SVE_ASIMM
:
4652 if (opnd
->shifter
.amount
)
4653 snprintf (buf
, size
, "%s, %s %s",
4654 style_imm (styler
, "#%" PRIi64
, opnd
->imm
.value
),
4655 style_sub_mnem (styler
, "lsl"),
4656 style_imm (styler
, "#%" PRIi64
, opnd
->shifter
.amount
));
4658 snprintf (buf
, size
, "%s",
4659 style_imm (styler
, "#%" PRIi64
, opnd
->imm
.value
));
4662 case AARCH64_OPND_FPIMM
:
4663 case AARCH64_OPND_SIMD_FPIMM
:
4664 case AARCH64_OPND_SVE_FPIMM8
:
4665 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
4667 case 2: /* e.g. FMOV <Hd>, #<imm>. */
4670 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
4671 snprintf (buf
, size
, "%s", style_imm (styler
, "#%.18e", c
.f
));
4674 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
4677 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
4678 snprintf (buf
, size
, "%s", style_imm (styler
, "#%.18e", c
.f
));
4681 case 8: /* e.g. FMOV <Sd>, #<imm>. */
4684 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
4685 snprintf (buf
, size
, "%s", style_imm (styler
, "#%.18e", c
.d
));
4689 snprintf (buf
, size
, "<invalid>");
4694 case AARCH64_OPND_CCMP_IMM
:
4695 case AARCH64_OPND_NZCV
:
4696 case AARCH64_OPND_EXCEPTION
:
4697 case AARCH64_OPND_UIMM4
:
4698 case AARCH64_OPND_UIMM4_ADDG
:
4699 case AARCH64_OPND_UIMM7
:
4700 case AARCH64_OPND_UIMM10
:
4701 if (optional_operand_p (opcode
, idx
)
4702 && (opnd
->imm
.value
==
4703 (int64_t) get_optional_operand_default_value (opcode
)))
4704 /* Omit the operand, e.g. DCPS1. */
4706 snprintf (buf
, size
, "%s",
4707 style_imm (styler
, "#0x%x", (unsigned int) opnd
->imm
.value
));
4710 case AARCH64_OPND_COND
:
4711 case AARCH64_OPND_COND1
:
4712 snprintf (buf
, size
, "%s",
4713 style_sub_mnem (styler
, opnd
->cond
->names
[0]));
4714 num_conds
= ARRAY_SIZE (opnd
->cond
->names
);
4715 for (i
= 1; i
< num_conds
&& opnd
->cond
->names
[i
]; ++i
)
4717 size_t len
= comment
!= NULL
? strlen (comment
) : 0;
4719 snprintf (comment
+ len
, comment_size
- len
, "%s = %s",
4720 opnd
->cond
->names
[0], opnd
->cond
->names
[i
]);
4722 snprintf (comment
+ len
, comment_size
- len
, ", %s",
4723 opnd
->cond
->names
[i
]);
4727 case AARCH64_OPND_ADDR_ADRP
:
4728 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
4734 /* This is not necessary during the disassembling, as print_address_func
4735 in the disassemble_info will take care of the printing. But some
4736 other callers may be still interested in getting the string in *STR,
4737 so here we do snprintf regardless. */
4738 snprintf (buf
, size
, "%s", style_addr (styler
, "#0x%" PRIx64
, addr
));
4741 case AARCH64_OPND_ADDR_PCREL14
:
4742 case AARCH64_OPND_ADDR_PCREL19
:
4743 case AARCH64_OPND_ADDR_PCREL21
:
4744 case AARCH64_OPND_ADDR_PCREL26
:
4745 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
4750 /* This is not necessary during the disassembling, as print_address_func
4751 in the disassemble_info will take care of the printing. But some
4752 other callers may be still interested in getting the string in *STR,
4753 so here we do snprintf regardless. */
4754 snprintf (buf
, size
, "%s", style_addr (styler
, "#0x%" PRIx64
, addr
));
4757 case AARCH64_OPND_ADDR_SIMPLE
:
4758 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
4759 case AARCH64_OPND_SIMD_ADDR_POST
:
4760 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
4761 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
4763 if (opnd
->addr
.offset
.is_reg
)
4764 snprintf (buf
, size
, "[%s], %s",
4765 style_reg (styler
, name
),
4766 style_reg (styler
, "x%d", opnd
->addr
.offset
.regno
));
4768 snprintf (buf
, size
, "[%s], %s",
4769 style_reg (styler
, name
),
4770 style_imm (styler
, "#%d", opnd
->addr
.offset
.imm
));
4773 snprintf (buf
, size
, "[%s]", style_reg (styler
, name
));
4776 case AARCH64_OPND_ADDR_REGOFF
:
4777 case AARCH64_OPND_SVE_ADDR_R
:
4778 case AARCH64_OPND_SVE_ADDR_RR
:
4779 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
4780 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
4781 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
4782 case AARCH64_OPND_SVE_ADDR_RR_LSL4
:
4783 case AARCH64_OPND_SVE_ADDR_RX
:
4784 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
4785 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
4786 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
4787 case AARCH64_OPND_SVE_ADDR_RX_LSL4
:
4788 print_register_offset_address
4789 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
4790 get_offset_int_reg_name (opnd
), styler
);
4793 case AARCH64_OPND_SVE_ADDR_ZX
:
4794 print_register_offset_address
4796 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
),
4797 get_64bit_int_reg_name (opnd
->addr
.offset
.regno
, 0), styler
);
4800 case AARCH64_OPND_SVE_ADDR_RZ
:
4801 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
4802 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
4803 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
4804 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
4805 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
4806 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
4807 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
4808 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
4809 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
4810 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
4811 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
4812 print_register_offset_address
4813 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
4814 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
),
4818 case AARCH64_OPND_ADDR_SIMM7
:
4819 case AARCH64_OPND_ADDR_SIMM9
:
4820 case AARCH64_OPND_ADDR_SIMM9_2
:
4821 case AARCH64_OPND_ADDR_SIMM10
:
4822 case AARCH64_OPND_ADDR_SIMM11
:
4823 case AARCH64_OPND_ADDR_SIMM13
:
4824 case AARCH64_OPND_RCPC3_ADDR_OFFSET
:
4825 case AARCH64_OPND_ADDR_OFFSET
:
4826 case AARCH64_OPND_RCPC3_ADDR_OPT_POSTIND
:
4827 case AARCH64_OPND_RCPC3_ADDR_OPT_PREIND_WB
:
4828 case AARCH64_OPND_RCPC3_ADDR_POSTIND
:
4829 case AARCH64_OPND_RCPC3_ADDR_PREIND_WB
:
4830 case AARCH64_OPND_SME_ADDR_RI_U4xVL
:
4831 case AARCH64_OPND_SVE_ADDR_RI_S4x16
:
4832 case AARCH64_OPND_SVE_ADDR_RI_S4x32
:
4833 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
4834 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
4835 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
4836 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
4837 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
4838 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
4839 case AARCH64_OPND_SVE_ADDR_RI_U6
:
4840 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
4841 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
4842 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
4843 print_immediate_offset_address
4844 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
4848 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
4849 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
4850 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
4851 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
4852 print_immediate_offset_address
4854 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
),
4858 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
4859 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
4860 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
4861 print_register_offset_address
4863 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
),
4864 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
),
4868 case AARCH64_OPND_ADDR_UIMM12
:
4869 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
4870 if (opnd
->addr
.offset
.imm
)
4871 snprintf (buf
, size
, "[%s, %s]",
4872 style_reg (styler
, name
),
4873 style_imm (styler
, "#%d", opnd
->addr
.offset
.imm
));
4875 snprintf (buf
, size
, "[%s]", style_reg (styler
, name
));
4878 case AARCH64_OPND_SYSREG
:
4879 case AARCH64_OPND_SYSREG128
:
4880 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
4882 const aarch64_sys_reg
*sr
= aarch64_sys_regs
+ i
;
4885 = (!(sr
->flags
& (F_REG_READ
| F_REG_WRITE
))
4886 || (sr
->flags
& opnd
->sysreg
.flags
) == opnd
->sysreg
.flags
)
4887 && AARCH64_CPU_HAS_ALL_FEATURES (features
, sr
->features
);
4889 /* Try and find an exact match, But if that fails, return the first
4890 partial match that was found. */
4891 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
.value
4892 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs
[i
].flags
)
4893 && ! aarch64_sys_reg_alias_p (aarch64_sys_regs
[i
].flags
)
4894 && (name
== NULL
|| exact_match
))
4896 name
= aarch64_sys_regs
[i
].name
;
4904 /* If we didn't match exactly, that means the presense of a flag
4905 indicates what we didn't want for this instruction. e.g. If
4906 F_REG_READ is there, that means we were looking for a write
4907 register. See aarch64_ext_sysreg. */
4908 if (aarch64_sys_regs
[i
].flags
& F_REG_WRITE
)
4909 *notes
= _("reading from a write-only register");
4910 else if (aarch64_sys_regs
[i
].flags
& F_REG_READ
)
4911 *notes
= _("writing to a read-only register");
4916 snprintf (buf
, size
, "%s", style_reg (styler
, name
));
4919 /* Implementation defined system register. */
4920 unsigned int value
= opnd
->sysreg
.value
;
4921 snprintf (buf
, size
, "%s",
4922 style_reg (styler
, "s%u_%u_c%u_c%u_%u",
4923 (value
>> 14) & 0x3, (value
>> 11) & 0x7,
4924 (value
>> 7) & 0xf, (value
>> 3) & 0xf,
4929 case AARCH64_OPND_PSTATEFIELD
:
4930 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
4931 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
4933 /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
4934 SVCRZA and SVCRSMZA. */
4935 uint32_t flags
= aarch64_pstatefields
[i
].flags
;
4936 if (flags
& F_REG_IN_CRM
4937 && (PSTATE_DECODE_CRM (opnd
->sysreg
.flags
)
4938 != PSTATE_DECODE_CRM (flags
)))
4942 assert (aarch64_pstatefields
[i
].name
);
4943 snprintf (buf
, size
, "%s",
4944 style_reg (styler
, aarch64_pstatefields
[i
].name
));
4947 case AARCH64_OPND_SYSREG_AT
:
4948 case AARCH64_OPND_SYSREG_DC
:
4949 case AARCH64_OPND_SYSREG_IC
:
4950 case AARCH64_OPND_SYSREG_TLBI
:
4951 case AARCH64_OPND_SYSREG_TLBIP
:
4952 case AARCH64_OPND_SYSREG_SR
:
4953 snprintf (buf
, size
, "%s", style_reg (styler
, opnd
->sysins_op
->name
));
4956 case AARCH64_OPND_BARRIER
:
4957 case AARCH64_OPND_BARRIER_DSB_NXS
:
4959 if (opnd
->barrier
->name
[0] == '#')
4960 snprintf (buf
, size
, "%s", style_imm (styler
, opnd
->barrier
->name
));
4962 snprintf (buf
, size
, "%s",
4963 style_sub_mnem (styler
, opnd
->barrier
->name
));
4967 case AARCH64_OPND_BARRIER_ISB
:
4968 /* Operand can be omitted, e.g. in DCPS1. */
4969 if (! optional_operand_p (opcode
, idx
)
4970 || (opnd
->barrier
->value
4971 != get_optional_operand_default_value (opcode
)))
4972 snprintf (buf
, size
, "%s",
4973 style_imm (styler
, "#0x%x", opnd
->barrier
->value
));
4976 case AARCH64_OPND_PRFOP
:
4977 if (opnd
->prfop
->name
!= NULL
)
4978 snprintf (buf
, size
, "%s", style_sub_mnem (styler
, opnd
->prfop
->name
));
4980 snprintf (buf
, size
, "%s", style_imm (styler
, "#0x%02x",
4981 opnd
->prfop
->value
));
4984 case AARCH64_OPND_RPRFMOP
:
4985 enum_value
= opnd
->imm
.value
;
4986 if (enum_value
< ARRAY_SIZE (aarch64_rprfmop_array
)
4987 && aarch64_rprfmop_array
[enum_value
])
4988 snprintf (buf
, size
, "%s",
4989 style_reg (styler
, aarch64_rprfmop_array
[enum_value
]));
4991 snprintf (buf
, size
, "%s",
4992 style_imm (styler
, "#%" PRIi64
, opnd
->imm
.value
));
4995 case AARCH64_OPND_BARRIER_PSB
:
4996 snprintf (buf
, size
, "%s", style_sub_mnem (styler
, "csync"));
4999 case AARCH64_OPND_X16
:
5000 snprintf (buf
, size
, "%s", style_reg (styler
, "x16"));
5003 case AARCH64_OPND_SME_ZT0
:
5004 snprintf (buf
, size
, "%s", style_reg (styler
, "zt0"));
5007 case AARCH64_OPND_SME_ZT0_INDEX
:
5008 snprintf (buf
, size
, "%s[%s]", style_reg (styler
, "zt0"),
5009 style_imm (styler
, "%d", (int) opnd
->imm
.value
));
5011 case AARCH64_OPND_SME_ZT0_INDEX2_12
:
5012 snprintf (buf
, size
, "%s[%s, %s]", style_reg (styler
, "zt0"),
5013 style_imm (styler
, "%d", (int) opnd
->imm
.value
),
5014 style_sub_mnem (styler
, "mul vl"));
5017 case AARCH64_OPND_SME_ZT0_LIST
:
5018 snprintf (buf
, size
, "{%s}", style_reg (styler
, "zt0"));
5021 case AARCH64_OPND_BARRIER_GCSB
:
5022 snprintf (buf
, size
, "%s", style_sub_mnem (styler
, "dsync"));
5025 case AARCH64_OPND_BTI_TARGET
:
5026 if ((HINT_FLAG (opnd
->hint_option
->value
) & HINT_OPD_F_NOPRINT
) == 0)
5027 snprintf (buf
, size
, "%s",
5028 style_sub_mnem (styler
, opnd
->hint_option
->name
));
5031 case AARCH64_OPND_MOPS_ADDR_Rd
:
5032 case AARCH64_OPND_MOPS_ADDR_Rs
:
5033 snprintf (buf
, size
, "[%s]!",
5035 get_int_reg_name (opnd
->reg
.regno
,
5036 AARCH64_OPND_QLF_X
, 0)));
5039 case AARCH64_OPND_MOPS_WB_Rn
:
5040 snprintf (buf
, size
, "%s!",
5041 style_reg (styler
, get_int_reg_name (opnd
->reg
.regno
,
5042 AARCH64_OPND_QLF_X
, 0)));
5046 snprintf (buf
, size
, "<invalid>");
5051 #define CPENC(op0,op1,crn,crm,op2) \
5052 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
5053 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
5054 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
5055 /* for 3.9.10 System Instructions */
5056 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
5075 /* TODO there is one more issues need to be resolved
5076 1. handle cpu-implementation-defined system registers.
5078 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
5079 respectively. If neither of these are set then the register is read-write. */
5080 const aarch64_sys_reg aarch64_sys_regs
[] =
5082 #define SYSREG(name, encoding, flags, features) \
5083 { name, encoding, flags, features },
5084 #include "aarch64-sys-regs.def"
5085 { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES
}
5090 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags
)
5092 return (reg_flags
& F_DEPRECATED
) != 0;
5096 aarch64_sys_reg_128bit_p (const uint32_t reg_flags
)
5098 return (reg_flags
& F_REG_128
) != 0;
5102 aarch64_sys_reg_alias_p (const uint32_t reg_flags
)
5104 return (reg_flags
& F_REG_ALIAS
) != 0;
5107 /* The CPENC below is fairly misleading, the fields
5108 here are not in CPENC form. They are in op2op1 form. The fields are encoded
5109 by ins_pstatefield, which just shifts the value by the width of the fields
5110 in a loop. So if you CPENC them only the first value will be set, the rest
5111 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
5112 value of 0b110000000001000000 (0x30040) while what you want is
5114 const aarch64_sys_reg aarch64_pstatefields
[] =
5116 { "spsel", 0x05, F_REG_MAX_VALUE (1), AARCH64_NO_FEATURES
},
5117 { "daifset", 0x1e, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES
},
5118 { "daifclr", 0x1f, F_REG_MAX_VALUE (15), AARCH64_NO_FEATURES
},
5119 { "pan", 0x04, F_REG_MAX_VALUE (1) | F_ARCHEXT
, AARCH64_FEATURE (PAN
) },
5120 { "uao", 0x03, F_REG_MAX_VALUE (1) | F_ARCHEXT
, AARCH64_FEATURE (V8_2A
) },
5121 { "ssbs", 0x19, F_REG_MAX_VALUE (1) | F_ARCHEXT
, AARCH64_FEATURE (SSBS
) },
5122 { "dit", 0x1a, F_REG_MAX_VALUE (1) | F_ARCHEXT
, AARCH64_FEATURE (V8_4A
) },
5123 { "tco", 0x1c, F_REG_MAX_VALUE (1) | F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5124 { "svcrsm", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x2,0x1) | F_REG_MAX_VALUE (1)
5125 | F_ARCHEXT
, AARCH64_FEATURE (SME
) },
5126 { "svcrza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x4,0x1) | F_REG_MAX_VALUE (1)
5127 | F_ARCHEXT
, AARCH64_FEATURE (SME
) },
5128 { "svcrsmza", 0x1b, PSTATE_ENCODE_CRM_AND_IMM (0x6,0x1) | F_REG_MAX_VALUE (1)
5129 | F_ARCHEXT
, AARCH64_FEATURE (SME
) },
5130 { "allint", 0x08, F_REG_MAX_VALUE (1) | F_ARCHEXT
, AARCH64_FEATURE (V8_8A
) },
5131 { 0, CPENC (0,0,0,0,0), 0, AARCH64_NO_FEATURES
},
5135 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
5136 const aarch64_sys_reg
*reg
)
5138 if (!(reg
->flags
& F_ARCHEXT
))
5141 return AARCH64_CPU_HAS_ALL_FEATURES (features
, reg
->features
);
5144 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
5146 { "ialluis", CPENS(0,C7
,C1
,0), 0, AARCH64_NO_FEATURES
},
5147 { "iallu", CPENS(0,C7
,C5
,0), 0, AARCH64_NO_FEATURES
},
5148 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
, AARCH64_NO_FEATURES
},
5149 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES
}
5152 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
5154 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
, AARCH64_NO_FEATURES
},
5155 { "gva", CPENS (3, C7
, C4
, 3), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5156 { "gzva", CPENS (3, C7
, C4
, 4), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5157 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
, AARCH64_NO_FEATURES
},
5158 { "igvac", CPENS (0, C7
, C6
, 3), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5159 { "igsw", CPENS (0, C7
, C6
, 4), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5160 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
, AARCH64_NO_FEATURES
},
5161 { "igdvac", CPENS (0, C7
, C6
, 5), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5162 { "igdsw", CPENS (0, C7
, C6
, 6), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5163 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
, AARCH64_NO_FEATURES
},
5164 { "cgvac", CPENS (3, C7
, C10
, 3), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5165 { "cgdvac", CPENS (3, C7
, C10
, 5), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5166 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
, AARCH64_NO_FEATURES
},
5167 { "cgsw", CPENS (0, C7
, C10
, 4), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5168 { "cgdsw", CPENS (0, C7
, C10
, 6), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5169 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
, AARCH64_NO_FEATURES
},
5170 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (V8_2A
) },
5171 { "cgvap", CPENS (3, C7
, C12
, 3), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5172 { "cgdvap", CPENS (3, C7
, C12
, 5), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5173 { "cvadp", CPENS (3, C7
, C13
, 1), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (CVADP
) },
5174 { "cgvadp", CPENS (3, C7
, C13
, 3), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5175 { "cgdvadp", CPENS (3, C7
, C13
, 5), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5176 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
, AARCH64_NO_FEATURES
},
5177 { "cigvac", CPENS (3, C7
, C14
, 3), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5178 { "cigdvac", CPENS (3, C7
, C14
, 5), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5179 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
, AARCH64_NO_FEATURES
},
5180 { "cigsw", CPENS (0, C7
, C14
, 4), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5181 { "cigdsw", CPENS (0, C7
, C14
, 6), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (MEMTAG
) },
5182 { "cipapa", CPENS (6, C7
, C14
, 1), F_HASXT
, AARCH64_NO_FEATURES
},
5183 { "cigdpapa", CPENS (6, C7
, C14
, 5), F_HASXT
, AARCH64_NO_FEATURES
},
5184 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES
}
5187 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
5189 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
, AARCH64_NO_FEATURES
},
5190 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
, AARCH64_NO_FEATURES
},
5191 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
, AARCH64_NO_FEATURES
},
5192 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
, AARCH64_NO_FEATURES
},
5193 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
, AARCH64_NO_FEATURES
},
5194 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
, AARCH64_NO_FEATURES
},
5195 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
, AARCH64_NO_FEATURES
},
5196 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
, AARCH64_NO_FEATURES
},
5197 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
, AARCH64_NO_FEATURES
},
5198 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
, AARCH64_NO_FEATURES
},
5199 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
, AARCH64_NO_FEATURES
},
5200 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
, AARCH64_NO_FEATURES
},
5201 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (V8_2A
) },
5202 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (V8_2A
) },
5203 { "s1e1a", CPENS (0, C7
, C9
, 2), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (ATS1A
) },
5204 { "s1e2a", CPENS (4, C7
, C9
, 2), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (ATS1A
) },
5205 { "s1e3a", CPENS (6, C7
, C9
, 2), F_HASXT
| F_ARCHEXT
, AARCH64_FEATURE (ATS1A
) },
5206 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES
}
5209 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
5211 { "rpaos", CPENS (6, C8
, C4
, 3), F_HASXT
, AARCH64_NO_FEATURES
},
5212 { "rpalos", CPENS (6, C8
, C4
, 7), F_HASXT
, AARCH64_NO_FEATURES
},
5213 { "paallos", CPENS (6, C8
, C1
, 4), 0, AARCH64_NO_FEATURES
},
5214 { "paall", CPENS (6, C8
, C7
, 4), 0, AARCH64_NO_FEATURES
},
5216 #define TLBI_XS_OP(OP, CODE, FLAGS) \
5217 { OP, CODE, FLAGS, AARCH64_NO_FEATURES }, \
5218 { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS | F_ARCHEXT, AARCH64_FEATURE (XS) },
5220 TLBI_XS_OP ( "vmalle1", CPENS (0, C8
, C7
, 0), 0)
5221 TLBI_XS_OP ( "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
| F_REG_128
)
5222 TLBI_XS_OP ( "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
)
5223 TLBI_XS_OP ( "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
| F_REG_128
)
5224 TLBI_XS_OP ( "vmalle1is", CPENS (0, C8
, C3
, 0), 0)
5225 TLBI_XS_OP ( "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
| F_REG_128
)
5226 TLBI_XS_OP ( "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
)
5227 TLBI_XS_OP ( "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
| F_REG_128
)
5228 TLBI_XS_OP ( "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
| F_REG_128
)
5229 TLBI_XS_OP ( "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
| F_REG_128
)
5230 TLBI_XS_OP ( "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
| F_REG_128
)
5231 TLBI_XS_OP ( "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
| F_REG_128
)
5232 TLBI_XS_OP ( "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
| F_REG_128
)
5233 TLBI_XS_OP ( "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
| F_REG_128
)
5234 TLBI_XS_OP ( "vmalls12e1",CPENS (4, C8
, C7
, 6), 0)
5235 TLBI_XS_OP ( "vmalls12e1is",CPENS(4,C8
, C3
, 6), 0)
5236 TLBI_XS_OP ( "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
| F_REG_128
)
5237 TLBI_XS_OP ( "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
| F_REG_128
)
5238 TLBI_XS_OP ( "alle2", CPENS (4, C8
, C7
, 0), 0)
5239 TLBI_XS_OP ( "alle2is", CPENS (4, C8
, C3
, 0), 0)
5240 TLBI_XS_OP ( "alle1", CPENS (4, C8
, C7
, 4), 0)
5241 TLBI_XS_OP ( "alle1is", CPENS (4, C8
, C3
, 4), 0)
5242 TLBI_XS_OP ( "alle3", CPENS (6, C8
, C7
, 0), 0)
5243 TLBI_XS_OP ( "alle3is", CPENS (6, C8
, C3
, 0), 0)
5244 TLBI_XS_OP ( "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
| F_REG_128
)
5245 TLBI_XS_OP ( "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
| F_REG_128
)
5246 TLBI_XS_OP ( "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
| F_REG_128
)
5247 TLBI_XS_OP ( "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
| F_REG_128
)
5248 TLBI_XS_OP ( "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
| F_REG_128
)
5249 TLBI_XS_OP ( "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
| F_REG_128
)
5250 TLBI_XS_OP ( "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
| F_REG_128
)
5251 TLBI_XS_OP ( "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
| F_REG_128
)
5254 #define TLBI_XS_OP(OP, CODE, FLAGS) \
5255 { OP, CODE, FLAGS | F_ARCHEXT, AARCH64_FEATURE (V8_4A) }, \
5256 { OP "nxs", CODE | CPENS (0, C9, 0, 0), FLAGS | F_ARCHEXT, AARCH64_FEATURE (XS) },
5258 TLBI_XS_OP ( "vmalle1os", CPENS (0, C8
, C1
, 0), 0 )
5259 TLBI_XS_OP ( "vae1os", CPENS (0, C8
, C1
, 1), F_HASXT
| F_REG_128
)
5260 TLBI_XS_OP ( "aside1os", CPENS (0, C8
, C1
, 2), F_HASXT
)
5261 TLBI_XS_OP ( "vaae1os", CPENS (0, C8
, C1
, 3), F_HASXT
| F_REG_128
)
5262 TLBI_XS_OP ( "vale1os", CPENS (0, C8
, C1
, 5), F_HASXT
| F_REG_128
)
5263 TLBI_XS_OP ( "vaale1os", CPENS (0, C8
, C1
, 7), F_HASXT
| F_REG_128
)
5264 TLBI_XS_OP ( "ipas2e1os", CPENS (4, C8
, C4
, 0), F_HASXT
| F_REG_128
)
5265 TLBI_XS_OP ( "ipas2le1os", CPENS (4, C8
, C4
, 4), F_HASXT
| F_REG_128
)
5266 TLBI_XS_OP ( "vae2os", CPENS (4, C8
, C1
, 1), F_HASXT
| F_REG_128
)
5267 TLBI_XS_OP ( "vale2os", CPENS (4, C8
, C1
, 5), F_HASXT
| F_REG_128
)
5268 TLBI_XS_OP ( "vmalls12e1os", CPENS (4, C8
, C1
, 6), 0 )
5269 TLBI_XS_OP ( "vae3os", CPENS (6, C8
, C1
, 1), F_HASXT
| F_REG_128
)
5270 TLBI_XS_OP ( "vale3os", CPENS (6, C8
, C1
, 5), F_HASXT
| F_REG_128
)
5271 TLBI_XS_OP ( "alle2os", CPENS (4, C8
, C1
, 0), 0 )
5272 TLBI_XS_OP ( "alle1os", CPENS (4, C8
, C1
, 4), 0 )
5273 TLBI_XS_OP ( "alle3os", CPENS (6, C8
, C1
, 0), 0 )
5275 TLBI_XS_OP ( "rvae1", CPENS (0, C8
, C6
, 1), F_HASXT
| F_REG_128
)
5276 TLBI_XS_OP ( "rvaae1", CPENS (0, C8
, C6
, 3), F_HASXT
| F_REG_128
)
5277 TLBI_XS_OP ( "rvale1", CPENS (0, C8
, C6
, 5), F_HASXT
| F_REG_128
)
5278 TLBI_XS_OP ( "rvaale1", CPENS (0, C8
, C6
, 7), F_HASXT
| F_REG_128
)
5279 TLBI_XS_OP ( "rvae1is", CPENS (0, C8
, C2
, 1), F_HASXT
| F_REG_128
)
5280 TLBI_XS_OP ( "rvaae1is", CPENS (0, C8
, C2
, 3), F_HASXT
| F_REG_128
)
5281 TLBI_XS_OP ( "rvale1is", CPENS (0, C8
, C2
, 5), F_HASXT
| F_REG_128
)
5282 TLBI_XS_OP ( "rvaale1is", CPENS (0, C8
, C2
, 7), F_HASXT
| F_REG_128
)
5283 TLBI_XS_OP ( "rvae1os", CPENS (0, C8
, C5
, 1), F_HASXT
| F_REG_128
)
5284 TLBI_XS_OP ( "rvaae1os", CPENS (0, C8
, C5
, 3), F_HASXT
| F_REG_128
)
5285 TLBI_XS_OP ( "rvale1os", CPENS (0, C8
, C5
, 5), F_HASXT
| F_REG_128
)
5286 TLBI_XS_OP ( "rvaale1os", CPENS (0, C8
, C5
, 7), F_HASXT
| F_REG_128
)
5287 TLBI_XS_OP ( "ripas2e1is", CPENS (4, C8
, C0
, 2), F_HASXT
| F_REG_128
)
5288 TLBI_XS_OP ( "ripas2le1is",CPENS (4, C8
, C0
, 6), F_HASXT
| F_REG_128
)
5289 TLBI_XS_OP ( "ripas2e1", CPENS (4, C8
, C4
, 2), F_HASXT
| F_REG_128
)
5290 TLBI_XS_OP ( "ripas2le1", CPENS (4, C8
, C4
, 6), F_HASXT
| F_REG_128
)
5291 TLBI_XS_OP ( "ripas2e1os", CPENS (4, C8
, C4
, 3), F_HASXT
| F_REG_128
)
5292 TLBI_XS_OP ( "ripas2le1os",CPENS (4, C8
, C4
, 7), F_HASXT
| F_REG_128
)
5293 TLBI_XS_OP ( "rvae2", CPENS (4, C8
, C6
, 1), F_HASXT
| F_REG_128
)
5294 TLBI_XS_OP ( "rvale2", CPENS (4, C8
, C6
, 5), F_HASXT
| F_REG_128
)
5295 TLBI_XS_OP ( "rvae2is", CPENS (4, C8
, C2
, 1), F_HASXT
| F_REG_128
)
5296 TLBI_XS_OP ( "rvale2is", CPENS (4, C8
, C2
, 5), F_HASXT
| F_REG_128
)
5297 TLBI_XS_OP ( "rvae2os", CPENS (4, C8
, C5
, 1), F_HASXT
| F_REG_128
)
5298 TLBI_XS_OP ( "rvale2os", CPENS (4, C8
, C5
, 5), F_HASXT
| F_REG_128
)
5299 TLBI_XS_OP ( "rvae3", CPENS (6, C8
, C6
, 1), F_HASXT
| F_REG_128
)
5300 TLBI_XS_OP ( "rvale3", CPENS (6, C8
, C6
, 5), F_HASXT
| F_REG_128
)
5301 TLBI_XS_OP ( "rvae3is", CPENS (6, C8
, C2
, 1), F_HASXT
| F_REG_128
)
5302 TLBI_XS_OP ( "rvale3is", CPENS (6, C8
, C2
, 5), F_HASXT
| F_REG_128
)
5303 TLBI_XS_OP ( "rvae3os", CPENS (6, C8
, C5
, 1), F_HASXT
| F_REG_128
)
5304 TLBI_XS_OP ( "rvale3os", CPENS (6, C8
, C5
, 5), F_HASXT
| F_REG_128
)
5308 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES
}
5311 const aarch64_sys_ins_reg aarch64_sys_regs_sr
[] =
5313 /* RCTX is somewhat unique in a way that it has different values
5314 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
5315 Thus op2 is masked out and instead encoded directly in the
5316 aarch64_opcode_table entries for the respective instructions. */
5317 { "rctx", CPENS(3,C7
,C3
,0), F_HASXT
| F_ARCHEXT
| F_REG_WRITE
, AARCH64_FEATURE (PREDRES
) }, /* WO */
5318 { 0, CPENS(0,0,0,0), 0, AARCH64_NO_FEATURES
}
5322 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
5324 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
5328 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
5329 const char *reg_name
,
5331 const aarch64_feature_set
*reg_features
)
5333 /* Armv8-R has no EL3. */
5334 if (AARCH64_CPU_HAS_FEATURE (features
, V8R
))
5336 const char *suffix
= strrchr (reg_name
, '_');
5337 if (suffix
&& !strcmp (suffix
, "_el3"))
5341 if (!(reg_flags
& F_ARCHEXT
))
5344 return AARCH64_CPU_HAS_ALL_FEATURES (features
, *reg_features
);
5364 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5365 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5367 static enum err_type
5368 verify_ldpsw (const struct aarch64_inst
*inst ATTRIBUTE_UNUSED
,
5369 const aarch64_insn insn
, bfd_vma pc ATTRIBUTE_UNUSED
,
5370 bool encoding ATTRIBUTE_UNUSED
,
5371 aarch64_operand_error
*mismatch_detail ATTRIBUTE_UNUSED
,
5372 aarch64_instr_sequence
*insn_sequence ATTRIBUTE_UNUSED
)
5374 int t
= BITS (insn
, 4, 0);
5375 int n
= BITS (insn
, 9, 5);
5376 int t2
= BITS (insn
, 14, 10);
5380 /* Write back enabled. */
5381 if ((t
== n
|| t2
== n
) && n
!= 31)
5395 /* Verifier for vector by element 3 operands functions where the
5396 conditions `if sz:L == 11 then UNDEFINED` holds. */
5398 static enum err_type
5399 verify_elem_sd (const struct aarch64_inst
*inst
, const aarch64_insn insn
,
5400 bfd_vma pc ATTRIBUTE_UNUSED
, bool encoding
,
5401 aarch64_operand_error
*mismatch_detail ATTRIBUTE_UNUSED
,
5402 aarch64_instr_sequence
*insn_sequence ATTRIBUTE_UNUSED
)
5404 const aarch64_insn undef_pattern
= 0x3;
5407 assert (inst
->opcode
);
5408 assert (inst
->opcode
->operands
[2] == AARCH64_OPND_Em
);
5409 value
= encoding
? inst
->value
: insn
;
5412 if (undef_pattern
== extract_fields (value
, 0, 2, FLD_sz
, FLD_L
))
5418 /* Check an instruction that takes three register operands and that
5419 requires the register numbers to be distinct from one another. */
5421 static enum err_type
5422 verify_three_different_regs (const struct aarch64_inst
*inst
,
5423 const aarch64_insn insn ATTRIBUTE_UNUSED
,
5424 bfd_vma pc ATTRIBUTE_UNUSED
,
5425 bool encoding ATTRIBUTE_UNUSED
,
5426 aarch64_operand_error
*mismatch_detail
5428 aarch64_instr_sequence
*insn_sequence
5433 rd
= inst
->operands
[0].reg
.regno
;
5434 rs
= inst
->operands
[1].reg
.regno
;
5435 rn
= inst
->operands
[2].reg
.regno
;
5436 if (rd
== rs
|| rd
== rn
|| rs
== rn
)
5438 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5439 mismatch_detail
->error
5440 = _("the three register operands must be distinct from one another");
5441 mismatch_detail
->index
= -1;
5448 /* Add INST to the end of INSN_SEQUENCE. */
5451 add_insn_to_sequence (const struct aarch64_inst
*inst
,
5452 aarch64_instr_sequence
*insn_sequence
)
5454 insn_sequence
->instr
[insn_sequence
->num_added_insns
++] = *inst
;
5457 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5458 If INST is NULL the given insn_sequence is cleared and the sequence is left
5462 init_insn_sequence (const struct aarch64_inst
*inst
,
5463 aarch64_instr_sequence
*insn_sequence
)
5465 int num_req_entries
= 0;
5467 if (insn_sequence
->instr
)
5469 XDELETE (insn_sequence
->instr
);
5470 insn_sequence
->instr
= NULL
;
5473 /* Handle all the cases here. May need to think of something smarter than
5474 a giant if/else chain if this grows. At that time, a lookup table may be
5476 if (inst
&& inst
->opcode
->constraints
& C_SCAN_MOVPRFX
)
5477 num_req_entries
= 1;
5478 if (inst
&& (inst
->opcode
->constraints
& C_SCAN_MOPS_PME
) == C_SCAN_MOPS_P
)
5479 num_req_entries
= 2;
5481 insn_sequence
->num_added_insns
= 0;
5482 insn_sequence
->num_allocated_insns
= num_req_entries
;
5484 if (num_req_entries
!= 0)
5486 insn_sequence
->instr
= XCNEWVEC (aarch64_inst
, num_req_entries
);
5487 add_insn_to_sequence (inst
, insn_sequence
);
5491 /* Subroutine of verify_constraints. Check whether the instruction
5492 is part of a MOPS P/M/E sequence and, if so, whether sequencing
5493 expectations are met. Return true if the check passes, otherwise
5494 describe the problem in MISMATCH_DETAIL.
5496 IS_NEW_SECTION is true if INST is assumed to start a new section.
5497 The other arguments are as for verify_constraints. */
5500 verify_mops_pme_sequence (const struct aarch64_inst
*inst
,
5501 bool is_new_section
,
5502 aarch64_operand_error
*mismatch_detail
,
5503 aarch64_instr_sequence
*insn_sequence
)
5505 const struct aarch64_opcode
*opcode
;
5506 const struct aarch64_inst
*prev_insn
;
5509 opcode
= inst
->opcode
;
5510 if (insn_sequence
->instr
)
5511 prev_insn
= insn_sequence
->instr
+ (insn_sequence
->num_added_insns
- 1);
5516 && (prev_insn
->opcode
->constraints
& C_SCAN_MOPS_PME
)
5517 && prev_insn
->opcode
!= opcode
- 1)
5519 mismatch_detail
->kind
= AARCH64_OPDE_EXPECTED_A_AFTER_B
;
5520 mismatch_detail
->error
= NULL
;
5521 mismatch_detail
->index
= -1;
5522 mismatch_detail
->data
[0].s
= prev_insn
->opcode
[1].name
;
5523 mismatch_detail
->data
[1].s
= prev_insn
->opcode
->name
;
5524 mismatch_detail
->non_fatal
= true;
5528 if (opcode
->constraints
& C_SCAN_MOPS_PME
)
5530 if (is_new_section
|| !prev_insn
|| prev_insn
->opcode
!= opcode
- 1)
5532 mismatch_detail
->kind
= AARCH64_OPDE_A_SHOULD_FOLLOW_B
;
5533 mismatch_detail
->error
= NULL
;
5534 mismatch_detail
->index
= -1;
5535 mismatch_detail
->data
[0].s
= opcode
->name
;
5536 mismatch_detail
->data
[1].s
= opcode
[-1].name
;
5537 mismatch_detail
->non_fatal
= true;
5541 for (i
= 0; i
< 3; ++i
)
5542 /* There's no specific requirement for the data register to be
5543 the same between consecutive SET* instructions. */
5544 if ((opcode
->operands
[i
] == AARCH64_OPND_MOPS_ADDR_Rd
5545 || opcode
->operands
[i
] == AARCH64_OPND_MOPS_ADDR_Rs
5546 || opcode
->operands
[i
] == AARCH64_OPND_MOPS_WB_Rn
)
5547 && prev_insn
->operands
[i
].reg
.regno
!= inst
->operands
[i
].reg
.regno
)
5549 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5550 if (opcode
->operands
[i
] == AARCH64_OPND_MOPS_ADDR_Rd
)
5551 mismatch_detail
->error
= _("destination register differs from "
5552 "preceding instruction");
5553 else if (opcode
->operands
[i
] == AARCH64_OPND_MOPS_ADDR_Rs
)
5554 mismatch_detail
->error
= _("source register differs from "
5555 "preceding instruction");
5557 mismatch_detail
->error
= _("size register differs from "
5558 "preceding instruction");
5559 mismatch_detail
->index
= i
;
5560 mismatch_detail
->non_fatal
= true;
5568 /* This function verifies that the instruction INST adheres to its specified
5569 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5570 returned and MISMATCH_DETAIL contains the reason why verification failed.
5572 The function is called both during assembly and disassembly. If assembling
5573 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5574 and will contain the PC of the current instruction w.r.t to the section.
5576 If ENCODING and PC=0 then you are at a start of a section. The constraints
5577 are verified against the given state insn_sequence which is updated as it
5578 transitions through the verification. */
5581 verify_constraints (const struct aarch64_inst
*inst
,
5582 const aarch64_insn insn ATTRIBUTE_UNUSED
,
5585 aarch64_operand_error
*mismatch_detail
,
5586 aarch64_instr_sequence
*insn_sequence
)
5589 assert (inst
->opcode
);
5591 const struct aarch64_opcode
*opcode
= inst
->opcode
;
5592 if (!opcode
->constraints
&& !insn_sequence
->instr
)
5595 assert (insn_sequence
);
5597 enum err_type res
= ERR_OK
;
5599 /* This instruction puts a constraint on the insn_sequence. */
5600 if (opcode
->flags
& F_SCAN
)
5602 if (insn_sequence
->instr
)
5604 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5605 mismatch_detail
->error
= _("instruction opens new dependency "
5606 "sequence without ending previous one");
5607 mismatch_detail
->index
= -1;
5608 mismatch_detail
->non_fatal
= true;
5612 init_insn_sequence (inst
, insn_sequence
);
5616 bool is_new_section
= (!encoding
&& pc
== 0);
5617 if (!verify_mops_pme_sequence (inst
, is_new_section
, mismatch_detail
,
5621 if ((opcode
->constraints
& C_SCAN_MOPS_PME
) != C_SCAN_MOPS_M
)
5622 init_insn_sequence (NULL
, insn_sequence
);
5625 /* Verify constraints on an existing sequence. */
5626 if (insn_sequence
->instr
)
5628 const struct aarch64_opcode
* inst_opcode
= insn_sequence
->instr
->opcode
;
5629 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5630 closed a previous one that we should have. */
5631 if (is_new_section
&& res
== ERR_OK
)
5633 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5634 mismatch_detail
->error
= _("previous `movprfx' sequence not closed");
5635 mismatch_detail
->index
= -1;
5636 mismatch_detail
->non_fatal
= true;
5638 /* Reset the sequence. */
5639 init_insn_sequence (NULL
, insn_sequence
);
5643 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5644 if (inst_opcode
->constraints
& C_SCAN_MOVPRFX
)
5646 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5647 instruction for better error messages. */
5648 if (!opcode
->avariant
5649 || (!AARCH64_CPU_HAS_FEATURE (*opcode
->avariant
, SVE
)
5650 && !AARCH64_CPU_HAS_FEATURE (*opcode
->avariant
, SVE2
)
5651 && !AARCH64_CPU_HAS_FEATURE (*opcode
->avariant
, SVE2p1
)))
5653 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5654 mismatch_detail
->error
= _("SVE instruction expected after "
5656 mismatch_detail
->index
= -1;
5657 mismatch_detail
->non_fatal
= true;
5662 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5663 instruction that is allowed to be used with a MOVPRFX. */
5664 if (!(opcode
->constraints
& C_SCAN_MOVPRFX
))
5666 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5667 mismatch_detail
->error
= _("SVE `movprfx' compatible instruction "
5669 mismatch_detail
->index
= -1;
5670 mismatch_detail
->non_fatal
= true;
5675 /* Next check for usage of the predicate register. */
5676 aarch64_opnd_info blk_dest
= insn_sequence
->instr
->operands
[0];
5677 aarch64_opnd_info blk_pred
, inst_pred
;
5678 memset (&blk_pred
, 0, sizeof (aarch64_opnd_info
));
5679 memset (&inst_pred
, 0, sizeof (aarch64_opnd_info
));
5680 bool predicated
= false;
5681 assert (blk_dest
.type
== AARCH64_OPND_SVE_Zd
);
5683 /* Determine if the movprfx instruction used is predicated or not. */
5684 if (insn_sequence
->instr
->operands
[1].type
== AARCH64_OPND_SVE_Pg3
)
5687 blk_pred
= insn_sequence
->instr
->operands
[1];
5690 unsigned char max_elem_size
= 0;
5691 unsigned char current_elem_size
;
5692 int num_op_used
= 0, last_op_usage
= 0;
5693 int i
, inst_pred_idx
= -1;
5694 int num_ops
= aarch64_num_of_operands (opcode
);
5695 for (i
= 0; i
< num_ops
; i
++)
5697 aarch64_opnd_info inst_op
= inst
->operands
[i
];
5698 switch (inst_op
.type
)
5700 case AARCH64_OPND_SVE_Zd
:
5701 case AARCH64_OPND_SVE_Zm_5
:
5702 case AARCH64_OPND_SVE_Zm_16
:
5703 case AARCH64_OPND_SVE_Zn
:
5704 case AARCH64_OPND_SVE_Zt
:
5705 case AARCH64_OPND_SVE_Vm
:
5706 case AARCH64_OPND_SVE_Vn
:
5707 case AARCH64_OPND_Va
:
5708 case AARCH64_OPND_Vn
:
5709 case AARCH64_OPND_Vm
:
5710 case AARCH64_OPND_Sn
:
5711 case AARCH64_OPND_Sm
:
5712 if (inst_op
.reg
.regno
== blk_dest
.reg
.regno
)
5718 = aarch64_get_qualifier_esize (inst_op
.qualifier
);
5719 if (current_elem_size
> max_elem_size
)
5720 max_elem_size
= current_elem_size
;
5722 case AARCH64_OPND_SVE_Pd
:
5723 case AARCH64_OPND_SVE_Pg3
:
5724 case AARCH64_OPND_SVE_Pg4_5
:
5725 case AARCH64_OPND_SVE_Pg4_10
:
5726 case AARCH64_OPND_SVE_Pg4_16
:
5727 case AARCH64_OPND_SVE_Pm
:
5728 case AARCH64_OPND_SVE_Pn
:
5729 case AARCH64_OPND_SVE_Pt
:
5730 case AARCH64_OPND_SME_Pm
:
5731 inst_pred
= inst_op
;
5739 assert (max_elem_size
!= 0);
5740 aarch64_opnd_info inst_dest
= inst
->operands
[0];
5741 /* Determine the size that should be used to compare against the
5744 = opcode
->constraints
& C_MAX_ELEM
5746 : aarch64_get_qualifier_esize (inst_dest
.qualifier
);
5748 /* If movprfx is predicated do some extra checks. */
5751 /* The instruction must be predicated. */
5752 if (inst_pred_idx
< 0)
5754 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5755 mismatch_detail
->error
= _("predicated instruction expected "
5757 mismatch_detail
->index
= -1;
5758 mismatch_detail
->non_fatal
= true;
5763 /* The instruction must have a merging predicate. */
5764 if (inst_pred
.qualifier
!= AARCH64_OPND_QLF_P_M
)
5766 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5767 mismatch_detail
->error
= _("merging predicate expected due "
5768 "to preceding `movprfx'");
5769 mismatch_detail
->index
= inst_pred_idx
;
5770 mismatch_detail
->non_fatal
= true;
5775 /* The same register must be used in instruction. */
5776 if (blk_pred
.reg
.regno
!= inst_pred
.reg
.regno
)
5778 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5779 mismatch_detail
->error
= _("predicate register differs "
5780 "from that in preceding "
5782 mismatch_detail
->index
= inst_pred_idx
;
5783 mismatch_detail
->non_fatal
= true;
5789 /* Destructive operations by definition must allow one usage of the
5792 = aarch64_is_destructive_by_operands (opcode
) ? 2 : 1;
5794 /* Operand is not used at all. */
5795 if (num_op_used
== 0)
5797 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5798 mismatch_detail
->error
= _("output register of preceding "
5799 "`movprfx' not used in current "
5801 mismatch_detail
->index
= 0;
5802 mismatch_detail
->non_fatal
= true;
5807 /* We now know it's used, now determine exactly where it's used. */
5808 if (blk_dest
.reg
.regno
!= inst_dest
.reg
.regno
)
5810 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5811 mismatch_detail
->error
= _("output register of preceding "
5812 "`movprfx' expected as output");
5813 mismatch_detail
->index
= 0;
5814 mismatch_detail
->non_fatal
= true;
5819 /* Operand used more than allowed for the specific opcode type. */
5820 if (num_op_used
> allowed_usage
)
5822 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5823 mismatch_detail
->error
= _("output register of preceding "
5824 "`movprfx' used as input");
5825 mismatch_detail
->index
= last_op_usage
;
5826 mismatch_detail
->non_fatal
= true;
5831 /* Now the only thing left is the qualifiers checks. The register
5832 must have the same maximum element size. */
5833 if (inst_dest
.qualifier
5834 && blk_dest
.qualifier
5835 && current_elem_size
5836 != aarch64_get_qualifier_esize (blk_dest
.qualifier
))
5838 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5839 mismatch_detail
->error
= _("register size not compatible with "
5840 "previous `movprfx'");
5841 mismatch_detail
->index
= 0;
5842 mismatch_detail
->non_fatal
= true;
5849 if (insn_sequence
->num_added_insns
== insn_sequence
->num_allocated_insns
)
5850 /* We've checked the last instruction in the sequence and so
5851 don't need the sequence any more. */
5852 init_insn_sequence (NULL
, insn_sequence
);
5854 add_insn_to_sequence (inst
, insn_sequence
);
5861 /* Return true if VALUE cannot be moved into an SVE register using DUP
5862 (with any element size, not just ESIZE) and if using DUPM would
5863 therefore be OK. ESIZE is the number of bytes in the immediate. */
5866 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue
, int esize
)
5868 int64_t svalue
= uvalue
;
5869 uint64_t upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
5871 if ((uvalue
& ~upper
) != uvalue
&& (uvalue
| upper
) != uvalue
)
5873 if (esize
<= 4 || (uint32_t) uvalue
== (uint32_t) (uvalue
>> 32))
5875 svalue
= (int32_t) uvalue
;
5876 if (esize
<= 2 || (uint16_t) uvalue
== (uint16_t) (uvalue
>> 16))
5878 svalue
= (int16_t) uvalue
;
5879 if (esize
== 1 || (uint8_t) uvalue
== (uint8_t) (uvalue
>> 8))
5883 if ((svalue
& 0xff) == 0)
5885 return svalue
< -128 || svalue
>= 128;
5888 /* Return true if a CPU with the AARCH64_FEATURE_* bits in CPU_VARIANT
5889 supports the instruction described by INST. */
5892 aarch64_cpu_supports_inst_p (aarch64_feature_set cpu_variant
,
5895 if (!inst
->opcode
->avariant
5896 || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant
, *inst
->opcode
->avariant
))
5899 if (inst
->opcode
->iclass
== sme_fp_sd
5900 && inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
5901 && !AARCH64_CPU_HAS_FEATURE (cpu_variant
, SME_F64F64
))
5904 if (inst
->opcode
->iclass
== sme_int_sd
5905 && inst
->operands
[0].qualifier
== AARCH64_OPND_QLF_S_D
5906 && !AARCH64_CPU_HAS_FEATURE (cpu_variant
, SME_I16I64
))
5912 /* Include the opcode description table as well as the operand description
5914 #define VERIFIER(x) verify_##x
5915 #include "aarch64-tbl.h"