1 /* aarch64-opc.c -- AArch64 opcode support.
2 Copyright (C) 2009-2020 Free Software Foundation, Inc.
3 Contributed by ARM Ltd.
5 This file is part of the GNU opcodes library.
7 This library is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
12 It is distributed in the hope that it will be useful, but WITHOUT
13 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14 or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
15 License for more details.
17 You should have received a copy of the GNU General Public License
18 along with this program; see the file COPYING3. If not,
19 see <http://www.gnu.org/licenses/>. */
25 #include "bfd_stdint.h"
30 #include "libiberty.h"
32 #include "aarch64-opc.h"
35 int debug_dump
= FALSE
;
36 #endif /* DEBUG_AARCH64 */
38 /* The enumeration strings associated with each value of a 5-bit SVE
39 pattern operand. A null entry indicates a reserved meaning. */
40 const char *const aarch64_sve_pattern_array
[32] = {
79 /* The enumeration strings associated with each value of a 4-bit SVE
80 prefetch operand. A null entry indicates a reserved meaning. */
81 const char *const aarch64_sve_prfop_array
[16] = {
102 /* Helper functions to determine which operand to be used to encode/decode
103 the size:Q fields for AdvSIMD instructions. */
105 static inline bfd_boolean
106 vector_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
108 return ((qualifier
>= AARCH64_OPND_QLF_V_8B
109 && qualifier
<= AARCH64_OPND_QLF_V_1Q
) ? TRUE
113 static inline bfd_boolean
114 fp_qualifier_p (enum aarch64_opnd_qualifier qualifier
)
116 return ((qualifier
>= AARCH64_OPND_QLF_S_B
117 && qualifier
<= AARCH64_OPND_QLF_S_Q
) ? TRUE
127 DP_VECTOR_ACROSS_LANES
,
130 static const char significant_operand_index
[] =
132 0, /* DP_UNKNOWN, by default using operand 0. */
133 0, /* DP_VECTOR_3SAME */
134 1, /* DP_VECTOR_LONG */
135 2, /* DP_VECTOR_WIDE */
136 1, /* DP_VECTOR_ACROSS_LANES */
139 /* Given a sequence of qualifiers in QUALIFIERS, determine and return
141 N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142 corresponds to one of a sequence of operands. */
144 static enum data_pattern
145 get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers
)
147 if (vector_qualifier_p (qualifiers
[0]) == TRUE
)
149 /* e.g. v.4s, v.4s, v.4s
150 or v.4h, v.4h, v.h[3]. */
151 if (qualifiers
[0] == qualifiers
[1]
152 && vector_qualifier_p (qualifiers
[2]) == TRUE
153 && (aarch64_get_qualifier_esize (qualifiers
[0])
154 == aarch64_get_qualifier_esize (qualifiers
[1]))
155 && (aarch64_get_qualifier_esize (qualifiers
[0])
156 == aarch64_get_qualifier_esize (qualifiers
[2])))
157 return DP_VECTOR_3SAME
;
158 /* e.g. v.8h, v.8b, v.8b.
159 or v.4s, v.4h, v.h[2].
161 if (vector_qualifier_p (qualifiers
[1]) == TRUE
162 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
163 && (aarch64_get_qualifier_esize (qualifiers
[0])
164 == aarch64_get_qualifier_esize (qualifiers
[1]) << 1))
165 return DP_VECTOR_LONG
;
166 /* e.g. v.8h, v.8h, v.8b. */
167 if (qualifiers
[0] == qualifiers
[1]
168 && vector_qualifier_p (qualifiers
[2]) == TRUE
169 && aarch64_get_qualifier_esize (qualifiers
[0]) != 0
170 && (aarch64_get_qualifier_esize (qualifiers
[0])
171 == aarch64_get_qualifier_esize (qualifiers
[2]) << 1)
172 && (aarch64_get_qualifier_esize (qualifiers
[0])
173 == aarch64_get_qualifier_esize (qualifiers
[1])))
174 return DP_VECTOR_WIDE
;
176 else if (fp_qualifier_p (qualifiers
[0]) == TRUE
)
178 /* e.g. SADDLV <V><d>, <Vn>.<T>. */
179 if (vector_qualifier_p (qualifiers
[1]) == TRUE
180 && qualifiers
[2] == AARCH64_OPND_QLF_NIL
)
181 return DP_VECTOR_ACROSS_LANES
;
187 /* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188 the AdvSIMD instructions. */
189 /* N.B. it is possible to do some optimization that doesn't call
190 get_data_pattern each time when we need to select an operand. We can
191 either buffer the caculated the result or statically generate the data,
192 however, it is not obvious that the optimization will bring significant
196 aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode
*opcode
)
199 significant_operand_index
[get_data_pattern (opcode
->qualifiers_list
[0])];
202 const aarch64_field fields
[] =
205 { 0, 4 }, /* cond2: condition in truly conditional-executed inst. */
206 { 0, 4 }, /* nzcv: flag bit specifier, encoded in the "nzcv" field. */
207 { 5, 5 }, /* defgh: d:e:f:g:h bits in AdvSIMD modified immediate. */
208 { 16, 3 }, /* abc: a:b:c bits in AdvSIMD modified immediate. */
209 { 5, 19 }, /* imm19: e.g. in CBZ. */
210 { 5, 19 }, /* immhi: e.g. in ADRP. */
211 { 29, 2 }, /* immlo: e.g. in ADRP. */
212 { 22, 2 }, /* size: in most AdvSIMD and floating-point instructions. */
213 { 10, 2 }, /* vldst_size: size field in the AdvSIMD load/store inst. */
214 { 29, 1 }, /* op: in AdvSIMD modified immediate instructions. */
215 { 30, 1 }, /* Q: in most AdvSIMD instructions. */
216 { 0, 5 }, /* Rt: in load/store instructions. */
217 { 0, 5 }, /* Rd: in many integer instructions. */
218 { 5, 5 }, /* Rn: in many integer instructions. */
219 { 10, 5 }, /* Rt2: in load/store pair instructions. */
220 { 10, 5 }, /* Ra: in fp instructions. */
221 { 5, 3 }, /* op2: in the system instructions. */
222 { 8, 4 }, /* CRm: in the system instructions. */
223 { 12, 4 }, /* CRn: in the system instructions. */
224 { 16, 3 }, /* op1: in the system instructions. */
225 { 19, 2 }, /* op0: in the system instructions. */
226 { 10, 3 }, /* imm3: in add/sub extended reg instructions. */
227 { 12, 4 }, /* cond: condition flags as a source operand. */
228 { 12, 4 }, /* opcode: in advsimd load/store instructions. */
229 { 12, 4 }, /* cmode: in advsimd modified immediate instructions. */
230 { 13, 3 }, /* asisdlso_opcode: opcode in advsimd ld/st single element. */
231 { 13, 2 }, /* len: in advsimd tbl/tbx instructions. */
232 { 16, 5 }, /* Rm: in ld/st reg offset and some integer inst. */
233 { 16, 5 }, /* Rs: in load/store exclusive instructions. */
234 { 13, 3 }, /* option: in ld/st reg offset + add/sub extended reg inst. */
235 { 12, 1 }, /* S: in load/store reg offset instructions. */
236 { 21, 2 }, /* hw: in move wide constant instructions. */
237 { 22, 2 }, /* opc: in load/store reg offset instructions. */
238 { 23, 1 }, /* opc1: in load/store reg offset instructions. */
239 { 22, 2 }, /* shift: in add/sub reg/imm shifted instructions. */
240 { 22, 2 }, /* type: floating point type field in fp data inst. */
241 { 30, 2 }, /* ldst_size: size field in ld/st reg offset inst. */
242 { 10, 6 }, /* imm6: in add/sub reg shifted instructions. */
243 { 15, 6 }, /* imm6_2: in rmif instructions. */
244 { 11, 4 }, /* imm4: in advsimd ext and advsimd ins instructions. */
245 { 0, 4 }, /* imm4_2: in rmif instructions. */
246 { 10, 4 }, /* imm4_3: in adddg/subg instructions. */
247 { 16, 5 }, /* imm5: in conditional compare (immediate) instructions. */
248 { 15, 7 }, /* imm7: in load/store pair pre/post index instructions. */
249 { 13, 8 }, /* imm8: in floating-point scalar move immediate inst. */
250 { 12, 9 }, /* imm9: in load/store pre/post index instructions. */
251 { 10, 12 }, /* imm12: in ld/st unsigned imm or add/sub shifted inst. */
252 { 5, 14 }, /* imm14: in test bit and branch instructions. */
253 { 5, 16 }, /* imm16: in exception instructions. */
254 { 0, 16 }, /* imm16_2: in udf instruction. */
255 { 0, 26 }, /* imm26: in unconditional branch instructions. */
256 { 10, 6 }, /* imms: in bitfield and logical immediate instructions. */
257 { 16, 6 }, /* immr: in bitfield and logical immediate instructions. */
258 { 16, 3 }, /* immb: in advsimd shift by immediate instructions. */
259 { 19, 4 }, /* immh: in advsimd shift by immediate instructions. */
260 { 22, 1 }, /* S: in LDRAA and LDRAB instructions. */
261 { 22, 1 }, /* N: in logical (immediate) instructions. */
262 { 11, 1 }, /* index: in ld/st inst deciding the pre/post-index. */
263 { 24, 1 }, /* index2: in ld/st pair inst deciding the pre/post-index. */
264 { 31, 1 }, /* sf: in integer data processing instructions. */
265 { 30, 1 }, /* lse_size: in LSE extension atomic instructions. */
266 { 11, 1 }, /* H: in advsimd scalar x indexed element instructions. */
267 { 21, 1 }, /* L: in advsimd scalar x indexed element instructions. */
268 { 20, 1 }, /* M: in advsimd scalar x indexed element instructions. */
269 { 31, 1 }, /* b5: in the test bit and branch instructions. */
270 { 19, 5 }, /* b40: in the test bit and branch instructions. */
271 { 10, 6 }, /* scale: in the fixed-point scalar to fp converting inst. */
272 { 4, 1 }, /* SVE_M_4: Merge/zero select, bit 4. */
273 { 14, 1 }, /* SVE_M_14: Merge/zero select, bit 14. */
274 { 16, 1 }, /* SVE_M_16: Merge/zero select, bit 16. */
275 { 17, 1 }, /* SVE_N: SVE equivalent of N. */
276 { 0, 4 }, /* SVE_Pd: p0-p15, bits [3,0]. */
277 { 10, 3 }, /* SVE_Pg3: p0-p7, bits [12,10]. */
278 { 5, 4 }, /* SVE_Pg4_5: p0-p15, bits [8,5]. */
279 { 10, 4 }, /* SVE_Pg4_10: p0-p15, bits [13,10]. */
280 { 16, 4 }, /* SVE_Pg4_16: p0-p15, bits [19,16]. */
281 { 16, 4 }, /* SVE_Pm: p0-p15, bits [19,16]. */
282 { 5, 4 }, /* SVE_Pn: p0-p15, bits [8,5]. */
283 { 0, 4 }, /* SVE_Pt: p0-p15, bits [3,0]. */
284 { 5, 5 }, /* SVE_Rm: SVE alternative position for Rm. */
285 { 16, 5 }, /* SVE_Rn: SVE alternative position for Rn. */
286 { 0, 5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0]. */
287 { 5, 5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5]. */
288 { 5, 5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5]. */
289 { 5, 5 }, /* SVE_Za_5: SVE vector register, bits [9,5]. */
290 { 16, 5 }, /* SVE_Za_16: SVE vector register, bits [20,16]. */
291 { 0, 5 }, /* SVE_Zd: SVE vector register. bits [4,0]. */
292 { 5, 5 }, /* SVE_Zm_5: SVE vector register, bits [9,5]. */
293 { 16, 5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
294 { 5, 5 }, /* SVE_Zn: SVE vector register, bits [9,5]. */
295 { 0, 5 }, /* SVE_Zt: SVE vector register, bits [4,0]. */
296 { 5, 1 }, /* SVE_i1: single-bit immediate. */
297 { 22, 1 }, /* SVE_i3h: high bit of 3-bit immediate. */
298 { 11, 1 }, /* SVE_i3l: low bit of 3-bit immediate. */
299 { 19, 2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19]. */
300 { 20, 1 }, /* SVE_i2h: high bit of 2bit immediate, bits. */
301 { 16, 3 }, /* SVE_imm3: 3-bit immediate field. */
302 { 16, 4 }, /* SVE_imm4: 4-bit immediate field. */
303 { 5, 5 }, /* SVE_imm5: 5-bit immediate field. */
304 { 16, 5 }, /* SVE_imm5b: secondary 5-bit immediate field. */
305 { 16, 6 }, /* SVE_imm6: 6-bit immediate field. */
306 { 14, 7 }, /* SVE_imm7: 7-bit immediate field. */
307 { 5, 8 }, /* SVE_imm8: 8-bit immediate field. */
308 { 5, 9 }, /* SVE_imm9: 9-bit immediate field. */
309 { 11, 6 }, /* SVE_immr: SVE equivalent of immr. */
310 { 5, 6 }, /* SVE_imms: SVE equivalent of imms. */
311 { 10, 2 }, /* SVE_msz: 2-bit shift amount for ADR. */
312 { 5, 5 }, /* SVE_pattern: vector pattern enumeration. */
313 { 0, 4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD]. */
314 { 16, 1 }, /* SVE_rot1: 1-bit rotation amount. */
315 { 10, 2 }, /* SVE_rot2: 2-bit rotation amount. */
316 { 10, 1 }, /* SVE_rot3: 1-bit rotation amount at bit 10. */
317 { 22, 1 }, /* SVE_sz: 1-bit element size select. */
318 { 17, 2 }, /* SVE_size: 2-bit element size, bits [18,17]. */
319 { 30, 1 }, /* SVE_sz2: 1-bit element size select. */
320 { 16, 4 }, /* SVE_tsz: triangular size select. */
321 { 22, 2 }, /* SVE_tszh: triangular size select high, bits [23,22]. */
322 { 8, 2 }, /* SVE_tszl_8: triangular size select low, bits [9,8]. */
323 { 19, 2 }, /* SVE_tszl_19: triangular size select low, bits [20,19]. */
324 { 14, 1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14). */
325 { 22, 1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22). */
326 { 11, 2 }, /* rotate1: FCMLA immediate rotate. */
327 { 13, 2 }, /* rotate2: Indexed element FCMLA immediate rotate. */
328 { 12, 1 }, /* rotate3: FCADD immediate rotate. */
329 { 12, 2 }, /* SM3: Indexed element SM3 2 bits index immediate. */
330 { 22, 1 }, /* sz: 1-bit element size select. */
331 { 10, 2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>. */
334 enum aarch64_operand_class
335 aarch64_get_operand_class (enum aarch64_opnd type
)
337 return aarch64_operands
[type
].op_class
;
341 aarch64_get_operand_name (enum aarch64_opnd type
)
343 return aarch64_operands
[type
].name
;
346 /* Get operand description string.
347 This is usually for the diagnosis purpose. */
349 aarch64_get_operand_desc (enum aarch64_opnd type
)
351 return aarch64_operands
[type
].desc
;
354 /* Table of all conditional affixes. */
355 const aarch64_cond aarch64_conds
[16] =
357 {{"eq", "none"}, 0x0},
358 {{"ne", "any"}, 0x1},
359 {{"cs", "hs", "nlast"}, 0x2},
360 {{"cc", "lo", "ul", "last"}, 0x3},
361 {{"mi", "first"}, 0x4},
362 {{"pl", "nfrst"}, 0x5},
365 {{"hi", "pmore"}, 0x8},
366 {{"ls", "plast"}, 0x9},
367 {{"ge", "tcont"}, 0xa},
368 {{"lt", "tstop"}, 0xb},
376 get_cond_from_value (aarch64_insn value
)
379 return &aarch64_conds
[(unsigned int) value
];
383 get_inverted_cond (const aarch64_cond
*cond
)
385 return &aarch64_conds
[cond
->value
^ 0x1];
388 /* Table describing the operand extension/shifting operators; indexed by
389 enum aarch64_modifier_kind.
391 The value column provides the most common values for encoding modifiers,
392 which enables table-driven encoding/decoding for the modifiers. */
393 const struct aarch64_name_value_pair aarch64_operand_modifiers
[] =
414 enum aarch64_modifier_kind
415 aarch64_get_operand_modifier (const struct aarch64_name_value_pair
*desc
)
417 return desc
- aarch64_operand_modifiers
;
421 aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind
)
423 return aarch64_operand_modifiers
[kind
].value
;
426 enum aarch64_modifier_kind
427 aarch64_get_operand_modifier_from_value (aarch64_insn value
,
428 bfd_boolean extend_p
)
430 if (extend_p
== TRUE
)
431 return AARCH64_MOD_UXTB
+ value
;
433 return AARCH64_MOD_LSL
- value
;
437 aarch64_extend_operator_p (enum aarch64_modifier_kind kind
)
439 return (kind
> AARCH64_MOD_LSL
&& kind
<= AARCH64_MOD_SXTX
)
443 static inline bfd_boolean
444 aarch64_shift_operator_p (enum aarch64_modifier_kind kind
)
446 return (kind
>= AARCH64_MOD_ROR
&& kind
<= AARCH64_MOD_LSL
)
450 const struct aarch64_name_value_pair aarch64_barrier_options
[16] =
470 const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options
[4] =
471 { /* CRm<3:2> #imm */
472 { "oshnxs", 16 }, /* 00 16 */
473 { "nshnxs", 20 }, /* 01 20 */
474 { "ishnxs", 24 }, /* 10 24 */
475 { "synxs", 28 }, /* 11 28 */
478 /* Table describing the operands supported by the aliases of the HINT
481 The name column is the operand that is accepted for the alias. The value
482 column is the hint number of the alias. The list of operands is terminated
483 by NULL in the name column. */
485 const struct aarch64_name_value_pair aarch64_hint_options
[] =
487 /* BTI. This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET. */
488 { " ", HINT_ENCODE (HINT_OPD_F_NOPRINT
, 0x20) },
489 { "csync", HINT_OPD_CSYNC
}, /* PSB CSYNC. */
490 { "c", HINT_OPD_C
}, /* BTI C. */
491 { "j", HINT_OPD_J
}, /* BTI J. */
492 { "jc", HINT_OPD_JC
}, /* BTI JC. */
493 { NULL
, HINT_OPD_NULL
},
496 /* op -> op: load = 0 instruction = 1 store = 2
498 t -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1 */
499 #define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
500 const struct aarch64_name_value_pair aarch64_prfops
[32] =
502 { "pldl1keep", B(0, 1, 0) },
503 { "pldl1strm", B(0, 1, 1) },
504 { "pldl2keep", B(0, 2, 0) },
505 { "pldl2strm", B(0, 2, 1) },
506 { "pldl3keep", B(0, 3, 0) },
507 { "pldl3strm", B(0, 3, 1) },
510 { "plil1keep", B(1, 1, 0) },
511 { "plil1strm", B(1, 1, 1) },
512 { "plil2keep", B(1, 2, 0) },
513 { "plil2strm", B(1, 2, 1) },
514 { "plil3keep", B(1, 3, 0) },
515 { "plil3strm", B(1, 3, 1) },
518 { "pstl1keep", B(2, 1, 0) },
519 { "pstl1strm", B(2, 1, 1) },
520 { "pstl2keep", B(2, 2, 0) },
521 { "pstl2strm", B(2, 2, 1) },
522 { "pstl3keep", B(2, 3, 0) },
523 { "pstl3strm", B(2, 3, 1) },
537 /* Utilities on value constraint. */
540 value_in_range_p (int64_t value
, int low
, int high
)
542 return (value
>= low
&& value
<= high
) ? 1 : 0;
545 /* Return true if VALUE is a multiple of ALIGN. */
547 value_aligned_p (int64_t value
, int align
)
549 return (value
% align
) == 0;
552 /* A signed value fits in a field. */
554 value_fit_signed_field_p (int64_t value
, unsigned width
)
557 if (width
< sizeof (value
) * 8)
559 int64_t lim
= (uint64_t) 1 << (width
- 1);
560 if (value
>= -lim
&& value
< lim
)
566 /* An unsigned value fits in a field. */
568 value_fit_unsigned_field_p (int64_t value
, unsigned width
)
571 if (width
< sizeof (value
) * 8)
573 int64_t lim
= (uint64_t) 1 << width
;
574 if (value
>= 0 && value
< lim
)
580 /* Return 1 if OPERAND is SP or WSP. */
582 aarch64_stack_pointer_p (const aarch64_opnd_info
*operand
)
584 return ((aarch64_get_operand_class (operand
->type
)
585 == AARCH64_OPND_CLASS_INT_REG
)
586 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
587 && operand
->reg
.regno
== 31);
590 /* Return 1 if OPERAND is XZR or WZP. */
592 aarch64_zero_register_p (const aarch64_opnd_info
*operand
)
594 return ((aarch64_get_operand_class (operand
->type
)
595 == AARCH64_OPND_CLASS_INT_REG
)
596 && !operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
)
597 && operand
->reg
.regno
== 31);
600 /* Return true if the operand *OPERAND that has the operand code
601 OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
602 qualified by the qualifier TARGET. */
605 operand_also_qualified_p (const struct aarch64_opnd_info
*operand
,
606 aarch64_opnd_qualifier_t target
)
608 switch (operand
->qualifier
)
610 case AARCH64_OPND_QLF_W
:
611 if (target
== AARCH64_OPND_QLF_WSP
&& aarch64_stack_pointer_p (operand
))
614 case AARCH64_OPND_QLF_X
:
615 if (target
== AARCH64_OPND_QLF_SP
&& aarch64_stack_pointer_p (operand
))
618 case AARCH64_OPND_QLF_WSP
:
619 if (target
== AARCH64_OPND_QLF_W
620 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
623 case AARCH64_OPND_QLF_SP
:
624 if (target
== AARCH64_OPND_QLF_X
625 && operand_maybe_stack_pointer (aarch64_operands
+ operand
->type
))
635 /* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
636 for operand KNOWN_IDX, return the expected qualifier for operand IDX.
638 Return NIL if more than one expected qualifiers are found. */
640 aarch64_opnd_qualifier_t
641 aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t
*qseq_list
,
643 const aarch64_opnd_qualifier_t known_qlf
,
650 When the known qualifier is NIL, we have to assume that there is only
651 one qualifier sequence in the *QSEQ_LIST and return the corresponding
652 qualifier directly. One scenario is that for instruction
653 PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
654 which has only one possible valid qualifier sequence
656 the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
657 determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
659 Because the qualifier NIL has dual roles in the qualifier sequence:
660 it can mean no qualifier for the operand, or the qualifer sequence is
661 not in use (when all qualifiers in the sequence are NILs), we have to
662 handle this special case here. */
663 if (known_qlf
== AARCH64_OPND_NIL
)
665 assert (qseq_list
[0][known_idx
] == AARCH64_OPND_NIL
);
666 return qseq_list
[0][idx
];
669 for (i
= 0, saved_i
= -1; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
)
671 if (qseq_list
[i
][known_idx
] == known_qlf
)
674 /* More than one sequences are found to have KNOWN_QLF at
676 return AARCH64_OPND_NIL
;
681 return qseq_list
[saved_i
][idx
];
684 enum operand_qualifier_kind
692 /* Operand qualifier description. */
693 struct operand_qualifier_data
695 /* The usage of the three data fields depends on the qualifier kind. */
702 enum operand_qualifier_kind kind
;
705 /* Indexed by the operand qualifier enumerators. */
706 struct operand_qualifier_data aarch64_opnd_qualifiers
[] =
708 {0, 0, 0, "NIL", OQK_NIL
},
710 /* Operand variant qualifiers.
712 element size, number of elements and common value for encoding. */
714 {4, 1, 0x0, "w", OQK_OPD_VARIANT
},
715 {8, 1, 0x1, "x", OQK_OPD_VARIANT
},
716 {4, 1, 0x0, "wsp", OQK_OPD_VARIANT
},
717 {8, 1, 0x1, "sp", OQK_OPD_VARIANT
},
719 {1, 1, 0x0, "b", OQK_OPD_VARIANT
},
720 {2, 1, 0x1, "h", OQK_OPD_VARIANT
},
721 {4, 1, 0x2, "s", OQK_OPD_VARIANT
},
722 {8, 1, 0x3, "d", OQK_OPD_VARIANT
},
723 {16, 1, 0x4, "q", OQK_OPD_VARIANT
},
724 {4, 1, 0x0, "4b", OQK_OPD_VARIANT
},
725 {4, 1, 0x0, "2h", OQK_OPD_VARIANT
},
727 {1, 4, 0x0, "4b", OQK_OPD_VARIANT
},
728 {1, 8, 0x0, "8b", OQK_OPD_VARIANT
},
729 {1, 16, 0x1, "16b", OQK_OPD_VARIANT
},
730 {2, 2, 0x0, "2h", OQK_OPD_VARIANT
},
731 {2, 4, 0x2, "4h", OQK_OPD_VARIANT
},
732 {2, 8, 0x3, "8h", OQK_OPD_VARIANT
},
733 {4, 2, 0x4, "2s", OQK_OPD_VARIANT
},
734 {4, 4, 0x5, "4s", OQK_OPD_VARIANT
},
735 {8, 1, 0x6, "1d", OQK_OPD_VARIANT
},
736 {8, 2, 0x7, "2d", OQK_OPD_VARIANT
},
737 {16, 1, 0x8, "1q", OQK_OPD_VARIANT
},
739 {0, 0, 0, "z", OQK_OPD_VARIANT
},
740 {0, 0, 0, "m", OQK_OPD_VARIANT
},
742 /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc). */
743 {16, 0, 0, "tag", OQK_OPD_VARIANT
},
745 /* Qualifiers constraining the value range.
747 Lower bound, higher bound, unused. */
749 {0, 15, 0, "CR", OQK_VALUE_IN_RANGE
},
750 {0, 7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE
},
751 {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE
},
752 {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE
},
753 {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE
},
754 {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE
},
755 {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE
},
757 /* Qualifiers for miscellaneous purpose.
759 unused, unused and unused. */
764 {0, 0, 0, "retrieving", 0},
767 static inline bfd_boolean
768 operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier
)
770 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_OPD_VARIANT
)
774 static inline bfd_boolean
775 qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier
)
777 return (aarch64_opnd_qualifiers
[qualifier
].kind
== OQK_VALUE_IN_RANGE
)
782 aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier
)
784 return aarch64_opnd_qualifiers
[qualifier
].desc
;
787 /* Given an operand qualifier, return the expected data element size
788 of a qualified operand. */
790 aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier
)
792 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
793 return aarch64_opnd_qualifiers
[qualifier
].data0
;
797 aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier
)
799 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
800 return aarch64_opnd_qualifiers
[qualifier
].data1
;
804 aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier
)
806 assert (operand_variant_qualifier_p (qualifier
) == TRUE
);
807 return aarch64_opnd_qualifiers
[qualifier
].data2
;
811 get_lower_bound (aarch64_opnd_qualifier_t qualifier
)
813 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
814 return aarch64_opnd_qualifiers
[qualifier
].data0
;
818 get_upper_bound (aarch64_opnd_qualifier_t qualifier
)
820 assert (qualifier_value_in_range_constraint_p (qualifier
) == TRUE
);
821 return aarch64_opnd_qualifiers
[qualifier
].data1
;
826 aarch64_verbose (const char *str
, ...)
837 dump_qualifier_sequence (const aarch64_opnd_qualifier_t
*qualifier
)
841 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
, ++qualifier
)
842 printf ("%s,", aarch64_get_qualifier_name (*qualifier
));
847 dump_match_qualifiers (const struct aarch64_opnd_info
*opnd
,
848 const aarch64_opnd_qualifier_t
*qualifier
)
851 aarch64_opnd_qualifier_t curr
[AARCH64_MAX_OPND_NUM
];
853 aarch64_verbose ("dump_match_qualifiers:");
854 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
855 curr
[i
] = opnd
[i
].qualifier
;
856 dump_qualifier_sequence (curr
);
857 aarch64_verbose ("against");
858 dump_qualifier_sequence (qualifier
);
860 #endif /* DEBUG_AARCH64 */
862 /* This function checks if the given instruction INSN is a destructive
863 instruction based on the usage of the registers. It does not recognize
864 unary destructive instructions. */
866 aarch64_is_destructive_by_operands (const aarch64_opcode
*opcode
)
869 const enum aarch64_opnd
*opnds
= opcode
->operands
;
871 if (opnds
[0] == AARCH64_OPND_NIL
)
874 while (opnds
[++i
] != AARCH64_OPND_NIL
)
875 if (opnds
[i
] == opnds
[0])
881 /* TODO improve this, we can have an extra field at the runtime to
882 store the number of operands rather than calculating it every time. */
885 aarch64_num_of_operands (const aarch64_opcode
*opcode
)
888 const enum aarch64_opnd
*opnds
= opcode
->operands
;
889 while (opnds
[i
++] != AARCH64_OPND_NIL
)
892 assert (i
>= 0 && i
<= AARCH64_MAX_OPND_NUM
);
896 /* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
897 If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
899 N.B. on the entry, it is very likely that only some operands in *INST
900 have had their qualifiers been established.
902 If STOP_AT is not -1, the function will only try to match
903 the qualifier sequence for operands before and including the operand
904 of index STOP_AT; and on success *RET will only be filled with the first
905 (STOP_AT+1) qualifiers.
907 A couple examples of the matching algorithm:
915 Apart from serving the main encoding routine, this can also be called
916 during or after the operand decoding. */
919 aarch64_find_best_match (const aarch64_inst
*inst
,
920 const aarch64_opnd_qualifier_seq_t
*qualifiers_list
,
921 int stop_at
, aarch64_opnd_qualifier_t
*ret
)
925 const aarch64_opnd_qualifier_t
*qualifiers
;
927 num_opnds
= aarch64_num_of_operands (inst
->opcode
);
930 DEBUG_TRACE ("SUCCEED: no operand");
934 if (stop_at
< 0 || stop_at
>= num_opnds
)
935 stop_at
= num_opnds
- 1;
937 /* For each pattern. */
938 for (i
= 0; i
< AARCH64_MAX_QLF_SEQ_NUM
; ++i
, ++qualifiers_list
)
941 qualifiers
= *qualifiers_list
;
943 /* Start as positive. */
946 DEBUG_TRACE ("%d", i
);
949 dump_match_qualifiers (inst
->operands
, qualifiers
);
952 /* Most opcodes has much fewer patterns in the list.
953 First NIL qualifier indicates the end in the list. */
954 if (empty_qualifier_sequence_p (qualifiers
) == TRUE
)
956 DEBUG_TRACE_IF (i
== 0, "SUCCEED: empty qualifier list");
962 for (j
= 0; j
< num_opnds
&& j
<= stop_at
; ++j
, ++qualifiers
)
964 if (inst
->operands
[j
].qualifier
== AARCH64_OPND_QLF_NIL
)
966 /* Either the operand does not have qualifier, or the qualifier
967 for the operand needs to be deduced from the qualifier
969 In the latter case, any constraint checking related with
970 the obtained qualifier should be done later in
971 operand_general_constraint_met_p. */
974 else if (*qualifiers
!= inst
->operands
[j
].qualifier
)
976 /* Unless the target qualifier can also qualify the operand
977 (which has already had a non-nil qualifier), non-equal
978 qualifiers are generally un-matched. */
979 if (operand_also_qualified_p (inst
->operands
+ j
, *qualifiers
))
988 continue; /* Equal qualifiers are certainly matched. */
991 /* Qualifiers established. */
998 /* Fill the result in *RET. */
1000 qualifiers
= *qualifiers_list
;
1002 DEBUG_TRACE ("complete qualifiers using list %d", i
);
1003 #ifdef DEBUG_AARCH64
1005 dump_qualifier_sequence (qualifiers
);
1008 for (j
= 0; j
<= stop_at
; ++j
, ++qualifiers
)
1009 ret
[j
] = *qualifiers
;
1010 for (; j
< AARCH64_MAX_OPND_NUM
; ++j
)
1011 ret
[j
] = AARCH64_OPND_QLF_NIL
;
1013 DEBUG_TRACE ("SUCCESS");
1017 DEBUG_TRACE ("FAIL");
1021 /* Operand qualifier matching and resolving.
1023 Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1024 sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1026 if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
1030 match_operands_qualifier (aarch64_inst
*inst
, bfd_boolean update_p
)
1033 aarch64_opnd_qualifier_seq_t qualifiers
;
1035 if (!aarch64_find_best_match (inst
, inst
->opcode
->qualifiers_list
, -1,
1038 DEBUG_TRACE ("matching FAIL");
1042 if (inst
->opcode
->flags
& F_STRICT
)
1044 /* Require an exact qualifier match, even for NIL qualifiers. */
1045 nops
= aarch64_num_of_operands (inst
->opcode
);
1046 for (i
= 0; i
< nops
; ++i
)
1047 if (inst
->operands
[i
].qualifier
!= qualifiers
[i
])
1051 /* Update the qualifiers. */
1052 if (update_p
== TRUE
)
1053 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
1055 if (inst
->opcode
->operands
[i
] == AARCH64_OPND_NIL
)
1057 DEBUG_TRACE_IF (inst
->operands
[i
].qualifier
!= qualifiers
[i
],
1058 "update %s with %s for operand %d",
1059 aarch64_get_qualifier_name (inst
->operands
[i
].qualifier
),
1060 aarch64_get_qualifier_name (qualifiers
[i
]), i
);
1061 inst
->operands
[i
].qualifier
= qualifiers
[i
];
1064 DEBUG_TRACE ("matching SUCCESS");
1068 /* Return TRUE if VALUE is a wide constant that can be moved into a general
1071 IS32 indicates whether value is a 32-bit immediate or not.
1072 If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1073 amount will be returned in *SHIFT_AMOUNT. */
1076 aarch64_wide_constant_p (uint64_t value
, int is32
, unsigned int *shift_amount
)
1080 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1084 /* Allow all zeros or all ones in top 32-bits, so that
1085 32-bit constant expressions like ~0x80000000 are
1087 if (value
>> 32 != 0 && value
>> 32 != 0xffffffff)
1088 /* Immediate out of range. */
1090 value
&= 0xffffffff;
1093 /* first, try movz then movn */
1095 if ((value
& ((uint64_t) 0xffff << 0)) == value
)
1097 else if ((value
& ((uint64_t) 0xffff << 16)) == value
)
1099 else if (!is32
&& (value
& ((uint64_t) 0xffff << 32)) == value
)
1101 else if (!is32
&& (value
& ((uint64_t) 0xffff << 48)) == value
)
1106 DEBUG_TRACE ("exit FALSE with 0x%" PRIx64
"(%" PRIi64
")", value
, value
);
1110 if (shift_amount
!= NULL
)
1111 *shift_amount
= amount
;
1113 DEBUG_TRACE ("exit TRUE with amount %d", amount
);
1118 /* Build the accepted values for immediate logical SIMD instructions.
1120 The standard encodings of the immediate value are:
1121 N imms immr SIMD size R S
1122 1 ssssss rrrrrr 64 UInt(rrrrrr) UInt(ssssss)
1123 0 0sssss 0rrrrr 32 UInt(rrrrr) UInt(sssss)
1124 0 10ssss 00rrrr 16 UInt(rrrr) UInt(ssss)
1125 0 110sss 000rrr 8 UInt(rrr) UInt(sss)
1126 0 1110ss 0000rr 4 UInt(rr) UInt(ss)
1127 0 11110s 00000r 2 UInt(r) UInt(s)
1128 where all-ones value of S is reserved.
1130 Let's call E the SIMD size.
1132 The immediate value is: S+1 bits '1' rotated to the right by R.
1134 The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1135 (remember S != E - 1). */
1137 #define TOTAL_IMM_NB 5334
1142 aarch64_insn encoding
;
1143 } simd_imm_encoding
;
1145 static simd_imm_encoding simd_immediates
[TOTAL_IMM_NB
];
1148 simd_imm_encoding_cmp(const void *i1
, const void *i2
)
1150 const simd_imm_encoding
*imm1
= (const simd_imm_encoding
*)i1
;
1151 const simd_imm_encoding
*imm2
= (const simd_imm_encoding
*)i2
;
1153 if (imm1
->imm
< imm2
->imm
)
1155 if (imm1
->imm
> imm2
->imm
)
1160 /* immediate bitfield standard encoding
1161 imm13<12> imm13<5:0> imm13<11:6> SIMD size R S
1162 1 ssssss rrrrrr 64 rrrrrr ssssss
1163 0 0sssss 0rrrrr 32 rrrrr sssss
1164 0 10ssss 00rrrr 16 rrrr ssss
1165 0 110sss 000rrr 8 rrr sss
1166 0 1110ss 0000rr 4 rr ss
1167 0 11110s 00000r 2 r s */
1169 encode_immediate_bitfield (int is64
, uint32_t s
, uint32_t r
)
1171 return (is64
<< 12) | (r
<< 6) | s
;
1175 build_immediate_table (void)
1177 uint32_t log_e
, e
, s
, r
, s_mask
;
1183 for (log_e
= 1; log_e
<= 6; log_e
++)
1185 /* Get element size. */
1190 mask
= 0xffffffffffffffffull
;
1196 mask
= (1ull << e
) - 1;
1198 1 ((1 << 4) - 1) << 2 = 111100
1199 2 ((1 << 3) - 1) << 3 = 111000
1200 3 ((1 << 2) - 1) << 4 = 110000
1201 4 ((1 << 1) - 1) << 5 = 100000
1202 5 ((1 << 0) - 1) << 6 = 000000 */
1203 s_mask
= ((1u << (5 - log_e
)) - 1) << (log_e
+ 1);
1205 for (s
= 0; s
< e
- 1; s
++)
1206 for (r
= 0; r
< e
; r
++)
1208 /* s+1 consecutive bits to 1 (s < 63) */
1209 imm
= (1ull << (s
+ 1)) - 1;
1210 /* rotate right by r */
1212 imm
= (imm
>> r
) | ((imm
<< (e
- r
)) & mask
);
1213 /* replicate the constant depending on SIMD size */
1216 case 1: imm
= (imm
<< 2) | imm
;
1218 case 2: imm
= (imm
<< 4) | imm
;
1220 case 3: imm
= (imm
<< 8) | imm
;
1222 case 4: imm
= (imm
<< 16) | imm
;
1224 case 5: imm
= (imm
<< 32) | imm
;
1229 simd_immediates
[nb_imms
].imm
= imm
;
1230 simd_immediates
[nb_imms
].encoding
=
1231 encode_immediate_bitfield(is64
, s
| s_mask
, r
);
1235 assert (nb_imms
== TOTAL_IMM_NB
);
1236 qsort(simd_immediates
, nb_imms
,
1237 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1240 /* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1241 be accepted by logical (immediate) instructions
1242 e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1244 ESIZE is the number of bytes in the decoded immediate value.
1245 If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1246 VALUE will be returned in *ENCODING. */
1249 aarch64_logical_immediate_p (uint64_t value
, int esize
, aarch64_insn
*encoding
)
1251 simd_imm_encoding imm_enc
;
1252 const simd_imm_encoding
*imm_encoding
;
1253 static bfd_boolean initialized
= FALSE
;
1257 DEBUG_TRACE ("enter with 0x%" PRIx64
"(%" PRIi64
"), esize: %d", value
,
1262 build_immediate_table ();
1266 /* Allow all zeros or all ones in top bits, so that
1267 constant expressions like ~1 are permitted. */
1268 upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
1269 if ((value
& ~upper
) != value
&& (value
| upper
) != value
)
1272 /* Replicate to a full 64-bit value. */
1274 for (i
= esize
* 8; i
< 64; i
*= 2)
1275 value
|= (value
<< i
);
1277 imm_enc
.imm
= value
;
1278 imm_encoding
= (const simd_imm_encoding
*)
1279 bsearch(&imm_enc
, simd_immediates
, TOTAL_IMM_NB
,
1280 sizeof(simd_immediates
[0]), simd_imm_encoding_cmp
);
1281 if (imm_encoding
== NULL
)
1283 DEBUG_TRACE ("exit with FALSE");
1286 if (encoding
!= NULL
)
1287 *encoding
= imm_encoding
->encoding
;
1288 DEBUG_TRACE ("exit with TRUE");
1292 /* If 64-bit immediate IMM is in the format of
1293 "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1294 where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1295 of value "abcdefgh". Otherwise return -1. */
1297 aarch64_shrink_expanded_imm8 (uint64_t imm
)
1303 for (i
= 0; i
< 8; i
++)
1305 byte
= (imm
>> (8 * i
)) & 0xff;
1308 else if (byte
!= 0x00)
1314 /* Utility inline functions for operand_general_constraint_met_p. */
1317 set_error (aarch64_operand_error
*mismatch_detail
,
1318 enum aarch64_operand_error_kind kind
, int idx
,
1321 if (mismatch_detail
== NULL
)
1323 mismatch_detail
->kind
= kind
;
1324 mismatch_detail
->index
= idx
;
1325 mismatch_detail
->error
= error
;
1329 set_syntax_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1332 if (mismatch_detail
== NULL
)
1334 set_error (mismatch_detail
, AARCH64_OPDE_SYNTAX_ERROR
, idx
, error
);
1338 set_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1339 int idx
, int lower_bound
, int upper_bound
,
1342 if (mismatch_detail
== NULL
)
1344 set_error (mismatch_detail
, AARCH64_OPDE_OUT_OF_RANGE
, idx
, error
);
1345 mismatch_detail
->data
[0] = lower_bound
;
1346 mismatch_detail
->data
[1] = upper_bound
;
1350 set_imm_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1351 int idx
, int lower_bound
, int upper_bound
)
1353 if (mismatch_detail
== NULL
)
1355 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1356 _("immediate value"));
1360 set_offset_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1361 int idx
, int lower_bound
, int upper_bound
)
1363 if (mismatch_detail
== NULL
)
1365 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1366 _("immediate offset"));
1370 set_regno_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1371 int idx
, int lower_bound
, int upper_bound
)
1373 if (mismatch_detail
== NULL
)
1375 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1376 _("register number"));
1380 set_elem_idx_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1381 int idx
, int lower_bound
, int upper_bound
)
1383 if (mismatch_detail
== NULL
)
1385 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1386 _("register element index"));
1390 set_sft_amount_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1391 int idx
, int lower_bound
, int upper_bound
)
1393 if (mismatch_detail
== NULL
)
1395 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1399 /* Report that the MUL modifier in operand IDX should be in the range
1400 [LOWER_BOUND, UPPER_BOUND]. */
1402 set_multiplier_out_of_range_error (aarch64_operand_error
*mismatch_detail
,
1403 int idx
, int lower_bound
, int upper_bound
)
1405 if (mismatch_detail
== NULL
)
1407 set_out_of_range_error (mismatch_detail
, idx
, lower_bound
, upper_bound
,
1412 set_unaligned_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1415 if (mismatch_detail
== NULL
)
1417 set_error (mismatch_detail
, AARCH64_OPDE_UNALIGNED
, idx
, NULL
);
1418 mismatch_detail
->data
[0] = alignment
;
1422 set_reg_list_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1425 if (mismatch_detail
== NULL
)
1427 set_error (mismatch_detail
, AARCH64_OPDE_REG_LIST
, idx
, NULL
);
1428 mismatch_detail
->data
[0] = expected_num
;
1432 set_other_error (aarch64_operand_error
*mismatch_detail
, int idx
,
1435 if (mismatch_detail
== NULL
)
1437 set_error (mismatch_detail
, AARCH64_OPDE_OTHER_ERROR
, idx
, error
);
1440 /* General constraint checking based on operand code.
1442 Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1443 as the IDXth operand of opcode OPCODE. Otherwise return 0.
1445 This function has to be called after the qualifiers for all operands
1448 Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1449 i.e. when MISMATCH_DETAIL is non-NULL. This avoids the generation
1450 of error message during the disassembling where error message is not
1451 wanted. We avoid the dynamic construction of strings of error messages
1452 here (i.e. in libopcodes), as it is costly and complicated; instead, we
1453 use a combination of error code, static string and some integer data to
1454 represent an error. */
1457 operand_general_constraint_met_p (const aarch64_opnd_info
*opnds
, int idx
,
1458 enum aarch64_opnd type
,
1459 const aarch64_opcode
*opcode
,
1460 aarch64_operand_error
*mismatch_detail
)
1462 unsigned num
, modifiers
, shift
;
1464 int64_t imm
, min_value
, max_value
;
1465 uint64_t uvalue
, mask
;
1466 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
1467 aarch64_opnd_qualifier_t qualifier
= opnd
->qualifier
;
1469 assert (opcode
->operands
[idx
] == opnd
->type
&& opnd
->type
== type
);
1471 switch (aarch64_operands
[type
].op_class
)
1473 case AARCH64_OPND_CLASS_INT_REG
:
1474 /* Check pair reg constraints for cas* instructions. */
1475 if (type
== AARCH64_OPND_PAIRREG
)
1477 assert (idx
== 1 || idx
== 3);
1478 if (opnds
[idx
- 1].reg
.regno
% 2 != 0)
1480 set_syntax_error (mismatch_detail
, idx
- 1,
1481 _("reg pair must start from even reg"));
1484 if (opnds
[idx
].reg
.regno
!= opnds
[idx
- 1].reg
.regno
+ 1)
1486 set_syntax_error (mismatch_detail
, idx
,
1487 _("reg pair must be contiguous"));
1493 /* <Xt> may be optional in some IC and TLBI instructions. */
1494 if (type
== AARCH64_OPND_Rt_SYS
)
1496 assert (idx
== 1 && (aarch64_get_operand_class (opnds
[0].type
)
1497 == AARCH64_OPND_CLASS_SYSTEM
));
1498 if (opnds
[1].present
1499 && !aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1501 set_other_error (mismatch_detail
, idx
, _("extraneous register"));
1504 if (!opnds
[1].present
1505 && aarch64_sys_ins_reg_has_xt (opnds
[0].sysins_op
))
1507 set_other_error (mismatch_detail
, idx
, _("missing register"));
1513 case AARCH64_OPND_QLF_WSP
:
1514 case AARCH64_OPND_QLF_SP
:
1515 if (!aarch64_stack_pointer_p (opnd
))
1517 set_other_error (mismatch_detail
, idx
,
1518 _("stack pointer register expected"));
1527 case AARCH64_OPND_CLASS_SVE_REG
:
1530 case AARCH64_OPND_SVE_Zm3_INDEX
:
1531 case AARCH64_OPND_SVE_Zm3_22_INDEX
:
1532 case AARCH64_OPND_SVE_Zm3_11_INDEX
:
1533 case AARCH64_OPND_SVE_Zm4_11_INDEX
:
1534 case AARCH64_OPND_SVE_Zm4_INDEX
:
1535 size
= get_operand_fields_width (get_operand_from_code (type
));
1536 shift
= get_operand_specific_data (&aarch64_operands
[type
]);
1537 mask
= (1 << shift
) - 1;
1538 if (opnd
->reg
.regno
> mask
)
1540 assert (mask
== 7 || mask
== 15);
1541 set_other_error (mismatch_detail
, idx
,
1543 ? _("z0-z15 expected")
1544 : _("z0-z7 expected"));
1547 mask
= (1u << (size
- shift
)) - 1;
1548 if (!value_in_range_p (opnd
->reglane
.index
, 0, mask
))
1550 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, mask
);
1555 case AARCH64_OPND_SVE_Zn_INDEX
:
1556 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1557 if (!value_in_range_p (opnd
->reglane
.index
, 0, 64 / size
- 1))
1559 set_elem_idx_out_of_range_error (mismatch_detail
, idx
,
1565 case AARCH64_OPND_SVE_ZnxN
:
1566 case AARCH64_OPND_SVE_ZtxN
:
1567 if (opnd
->reglist
.num_regs
!= get_opcode_dependent_value (opcode
))
1569 set_other_error (mismatch_detail
, idx
,
1570 _("invalid register list"));
1580 case AARCH64_OPND_CLASS_PRED_REG
:
1581 if (opnd
->reg
.regno
>= 8
1582 && get_operand_fields_width (get_operand_from_code (type
)) == 3)
1584 set_other_error (mismatch_detail
, idx
, _("p0-p7 expected"));
1589 case AARCH64_OPND_CLASS_COND
:
1590 if (type
== AARCH64_OPND_COND1
1591 && (opnds
[idx
].cond
->value
& 0xe) == 0xe)
1593 /* Not allow AL or NV. */
1594 set_syntax_error (mismatch_detail
, idx
, NULL
);
1598 case AARCH64_OPND_CLASS_ADDRESS
:
1599 /* Check writeback. */
1600 switch (opcode
->iclass
)
1604 case ldstnapair_offs
:
1607 if (opnd
->addr
.writeback
== 1)
1609 set_syntax_error (mismatch_detail
, idx
,
1610 _("unexpected address writeback"));
1615 if (opnd
->addr
.writeback
== 1 && opnd
->addr
.preind
!= 1)
1617 set_syntax_error (mismatch_detail
, idx
,
1618 _("unexpected address writeback"));
1623 case ldstpair_indexed
:
1626 if (opnd
->addr
.writeback
== 0)
1628 set_syntax_error (mismatch_detail
, idx
,
1629 _("address writeback expected"));
1634 assert (opnd
->addr
.writeback
== 0);
1639 case AARCH64_OPND_ADDR_SIMM7
:
1640 /* Scaled signed 7 bits immediate offset. */
1641 /* Get the size of the data element that is accessed, which may be
1642 different from that of the source register size,
1643 e.g. in strb/ldrb. */
1644 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1645 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -64 * size
, 63 * size
))
1647 set_offset_out_of_range_error (mismatch_detail
, idx
,
1648 -64 * size
, 63 * size
);
1651 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1653 set_unaligned_error (mismatch_detail
, idx
, size
);
1657 case AARCH64_OPND_ADDR_OFFSET
:
1658 case AARCH64_OPND_ADDR_SIMM9
:
1659 /* Unscaled signed 9 bits immediate offset. */
1660 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -256, 255))
1662 set_offset_out_of_range_error (mismatch_detail
, idx
, -256, 255);
1667 case AARCH64_OPND_ADDR_SIMM9_2
:
1668 /* Unscaled signed 9 bits immediate offset, which has to be negative
1670 size
= aarch64_get_qualifier_esize (qualifier
);
1671 if ((value_in_range_p (opnd
->addr
.offset
.imm
, 0, 255)
1672 && !value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1673 || value_in_range_p (opnd
->addr
.offset
.imm
, -256, -1))
1675 set_other_error (mismatch_detail
, idx
,
1676 _("negative or unaligned offset expected"));
1679 case AARCH64_OPND_ADDR_SIMM10
:
1680 /* Scaled signed 10 bits immediate offset. */
1681 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -4096, 4088))
1683 set_offset_out_of_range_error (mismatch_detail
, idx
, -4096, 4088);
1686 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 8))
1688 set_unaligned_error (mismatch_detail
, idx
, 8);
1693 case AARCH64_OPND_ADDR_SIMM11
:
1694 /* Signed 11 bits immediate offset (multiple of 16). */
1695 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -1024, 1008))
1697 set_offset_out_of_range_error (mismatch_detail
, idx
, -1024, 1008);
1701 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 16))
1703 set_unaligned_error (mismatch_detail
, idx
, 16);
1708 case AARCH64_OPND_ADDR_SIMM13
:
1709 /* Signed 13 bits immediate offset (multiple of 16). */
1710 if (!value_in_range_p (opnd
->addr
.offset
.imm
, -4096, 4080))
1712 set_offset_out_of_range_error (mismatch_detail
, idx
, -4096, 4080);
1716 if (!value_aligned_p (opnd
->addr
.offset
.imm
, 16))
1718 set_unaligned_error (mismatch_detail
, idx
, 16);
1723 case AARCH64_OPND_SIMD_ADDR_POST
:
1724 /* AdvSIMD load/store multiple structures, post-index. */
1726 if (opnd
->addr
.offset
.is_reg
)
1728 if (value_in_range_p (opnd
->addr
.offset
.regno
, 0, 30))
1732 set_other_error (mismatch_detail
, idx
,
1733 _("invalid register offset"));
1739 const aarch64_opnd_info
*prev
= &opnds
[idx
-1];
1740 unsigned num_bytes
; /* total number of bytes transferred. */
1741 /* The opcode dependent area stores the number of elements in
1742 each structure to be loaded/stored. */
1743 int is_ld1r
= get_opcode_dependent_value (opcode
) == 1;
1744 if (opcode
->operands
[0] == AARCH64_OPND_LVt_AL
)
1745 /* Special handling of loading single structure to all lane. */
1746 num_bytes
= (is_ld1r
? 1 : prev
->reglist
.num_regs
)
1747 * aarch64_get_qualifier_esize (prev
->qualifier
);
1749 num_bytes
= prev
->reglist
.num_regs
1750 * aarch64_get_qualifier_esize (prev
->qualifier
)
1751 * aarch64_get_qualifier_nelem (prev
->qualifier
);
1752 if ((int) num_bytes
!= opnd
->addr
.offset
.imm
)
1754 set_other_error (mismatch_detail
, idx
,
1755 _("invalid post-increment amount"));
1761 case AARCH64_OPND_ADDR_REGOFF
:
1762 /* Get the size of the data element that is accessed, which may be
1763 different from that of the source register size,
1764 e.g. in strb/ldrb. */
1765 size
= aarch64_get_qualifier_esize (opnd
->qualifier
);
1766 /* It is either no shift or shift by the binary logarithm of SIZE. */
1767 if (opnd
->shifter
.amount
!= 0
1768 && opnd
->shifter
.amount
!= (int)get_logsz (size
))
1770 set_other_error (mismatch_detail
, idx
,
1771 _("invalid shift amount"));
1774 /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1776 switch (opnd
->shifter
.kind
)
1778 case AARCH64_MOD_UXTW
:
1779 case AARCH64_MOD_LSL
:
1780 case AARCH64_MOD_SXTW
:
1781 case AARCH64_MOD_SXTX
: break;
1783 set_other_error (mismatch_detail
, idx
,
1784 _("invalid extend/shift operator"));
1789 case AARCH64_OPND_ADDR_UIMM12
:
1790 imm
= opnd
->addr
.offset
.imm
;
1791 /* Get the size of the data element that is accessed, which may be
1792 different from that of the source register size,
1793 e.g. in strb/ldrb. */
1794 size
= aarch64_get_qualifier_esize (qualifier
);
1795 if (!value_in_range_p (opnd
->addr
.offset
.imm
, 0, 4095 * size
))
1797 set_offset_out_of_range_error (mismatch_detail
, idx
,
1801 if (!value_aligned_p (opnd
->addr
.offset
.imm
, size
))
1803 set_unaligned_error (mismatch_detail
, idx
, size
);
1808 case AARCH64_OPND_ADDR_PCREL14
:
1809 case AARCH64_OPND_ADDR_PCREL19
:
1810 case AARCH64_OPND_ADDR_PCREL21
:
1811 case AARCH64_OPND_ADDR_PCREL26
:
1812 imm
= opnd
->imm
.value
;
1813 if (operand_need_shift_by_two (get_operand_from_code (type
)))
1815 /* The offset value in a PC-relative branch instruction is alway
1816 4-byte aligned and is encoded without the lowest 2 bits. */
1817 if (!value_aligned_p (imm
, 4))
1819 set_unaligned_error (mismatch_detail
, idx
, 4);
1822 /* Right shift by 2 so that we can carry out the following check
1826 size
= get_operand_fields_width (get_operand_from_code (type
));
1827 if (!value_fit_signed_field_p (imm
, size
))
1829 set_other_error (mismatch_detail
, idx
,
1830 _("immediate out of range"));
1835 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
1836 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
1837 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
1838 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
1842 assert (!opnd
->addr
.offset
.is_reg
);
1843 assert (opnd
->addr
.preind
);
1844 num
= 1 + get_operand_specific_data (&aarch64_operands
[type
]);
1847 if ((opnd
->addr
.offset
.imm
!= 0 && !opnd
->shifter
.operator_present
)
1848 || (opnd
->shifter
.operator_present
1849 && opnd
->shifter
.kind
!= AARCH64_MOD_MUL_VL
))
1851 set_other_error (mismatch_detail
, idx
,
1852 _("invalid addressing mode"));
1855 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1857 set_offset_out_of_range_error (mismatch_detail
, idx
,
1858 min_value
, max_value
);
1861 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1863 set_unaligned_error (mismatch_detail
, idx
, num
);
1868 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
1871 goto sve_imm_offset_vl
;
1873 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
1876 goto sve_imm_offset_vl
;
1878 case AARCH64_OPND_SVE_ADDR_RI_U6
:
1879 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
1880 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
1881 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
1885 assert (!opnd
->addr
.offset
.is_reg
);
1886 assert (opnd
->addr
.preind
);
1887 num
= 1 << get_operand_specific_data (&aarch64_operands
[type
]);
1890 if (opnd
->shifter
.operator_present
1891 || opnd
->shifter
.amount_present
)
1893 set_other_error (mismatch_detail
, idx
,
1894 _("invalid addressing mode"));
1897 if (!value_in_range_p (opnd
->addr
.offset
.imm
, min_value
, max_value
))
1899 set_offset_out_of_range_error (mismatch_detail
, idx
,
1900 min_value
, max_value
);
1903 if (!value_aligned_p (opnd
->addr
.offset
.imm
, num
))
1905 set_unaligned_error (mismatch_detail
, idx
, num
);
1910 case AARCH64_OPND_SVE_ADDR_RI_S4x16
:
1911 case AARCH64_OPND_SVE_ADDR_RI_S4x32
:
1914 goto sve_imm_offset
;
1916 case AARCH64_OPND_SVE_ADDR_ZX
:
1917 /* Everything is already ensured by parse_operands or
1918 aarch64_ext_sve_addr_rr_lsl (because this is a very specific
1920 assert (opnd
->addr
.offset
.is_reg
);
1921 assert (opnd
->addr
.preind
);
1922 assert ((aarch64_operands
[type
].flags
& OPD_F_NO_ZR
) == 0);
1923 assert (opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
1924 assert (opnd
->shifter
.operator_present
== 0);
1927 case AARCH64_OPND_SVE_ADDR_R
:
1928 case AARCH64_OPND_SVE_ADDR_RR
:
1929 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
1930 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
1931 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
1932 case AARCH64_OPND_SVE_ADDR_RX
:
1933 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
1934 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
1935 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
1936 case AARCH64_OPND_SVE_ADDR_RZ
:
1937 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
1938 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
1939 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
1940 modifiers
= 1 << AARCH64_MOD_LSL
;
1942 assert (opnd
->addr
.offset
.is_reg
);
1943 assert (opnd
->addr
.preind
);
1944 if ((aarch64_operands
[type
].flags
& OPD_F_NO_ZR
) != 0
1945 && opnd
->addr
.offset
.regno
== 31)
1947 set_other_error (mismatch_detail
, idx
,
1948 _("index register xzr is not allowed"));
1951 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1952 || (opnd
->shifter
.amount
1953 != get_operand_specific_data (&aarch64_operands
[type
])))
1955 set_other_error (mismatch_detail
, idx
,
1956 _("invalid addressing mode"));
1961 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
1962 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
1963 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
1964 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
1965 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
1966 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
1967 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
1968 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
1969 modifiers
= (1 << AARCH64_MOD_SXTW
) | (1 << AARCH64_MOD_UXTW
);
1970 goto sve_rr_operand
;
1972 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
1973 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
1974 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
1975 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
1978 goto sve_imm_offset
;
1980 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
1981 modifiers
= 1 << AARCH64_MOD_LSL
;
1983 assert (opnd
->addr
.offset
.is_reg
);
1984 assert (opnd
->addr
.preind
);
1985 if (((1 << opnd
->shifter
.kind
) & modifiers
) == 0
1986 || opnd
->shifter
.amount
< 0
1987 || opnd
->shifter
.amount
> 3)
1989 set_other_error (mismatch_detail
, idx
,
1990 _("invalid addressing mode"));
1995 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
1996 modifiers
= (1 << AARCH64_MOD_SXTW
);
1997 goto sve_zz_operand
;
1999 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
2000 modifiers
= 1 << AARCH64_MOD_UXTW
;
2001 goto sve_zz_operand
;
2008 case AARCH64_OPND_CLASS_SIMD_REGLIST
:
2009 if (type
== AARCH64_OPND_LEt
)
2011 /* Get the upper bound for the element index. */
2012 num
= 16 / aarch64_get_qualifier_esize (qualifier
) - 1;
2013 if (!value_in_range_p (opnd
->reglist
.index
, 0, num
))
2015 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2019 /* The opcode dependent area stores the number of elements in
2020 each structure to be loaded/stored. */
2021 num
= get_opcode_dependent_value (opcode
);
2024 case AARCH64_OPND_LVt
:
2025 assert (num
>= 1 && num
<= 4);
2026 /* Unless LD1/ST1, the number of registers should be equal to that
2027 of the structure elements. */
2028 if (num
!= 1 && opnd
->reglist
.num_regs
!= num
)
2030 set_reg_list_error (mismatch_detail
, idx
, num
);
2034 case AARCH64_OPND_LVt_AL
:
2035 case AARCH64_OPND_LEt
:
2036 assert (num
>= 1 && num
<= 4);
2037 /* The number of registers should be equal to that of the structure
2039 if (opnd
->reglist
.num_regs
!= num
)
2041 set_reg_list_error (mismatch_detail
, idx
, num
);
2050 case AARCH64_OPND_CLASS_IMMEDIATE
:
2051 /* Constraint check on immediate operand. */
2052 imm
= opnd
->imm
.value
;
2053 /* E.g. imm_0_31 constrains value to be 0..31. */
2054 if (qualifier_value_in_range_constraint_p (qualifier
)
2055 && !value_in_range_p (imm
, get_lower_bound (qualifier
),
2056 get_upper_bound (qualifier
)))
2058 set_imm_out_of_range_error (mismatch_detail
, idx
,
2059 get_lower_bound (qualifier
),
2060 get_upper_bound (qualifier
));
2066 case AARCH64_OPND_AIMM
:
2067 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2069 set_other_error (mismatch_detail
, idx
,
2070 _("invalid shift operator"));
2073 if (opnd
->shifter
.amount
!= 0 && opnd
->shifter
.amount
!= 12)
2075 set_other_error (mismatch_detail
, idx
,
2076 _("shift amount must be 0 or 12"));
2079 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 12))
2081 set_other_error (mismatch_detail
, idx
,
2082 _("immediate out of range"));
2087 case AARCH64_OPND_HALF
:
2088 assert (idx
== 1 && opnds
[0].type
== AARCH64_OPND_Rd
);
2089 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2091 set_other_error (mismatch_detail
, idx
,
2092 _("invalid shift operator"));
2095 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2096 if (!value_aligned_p (opnd
->shifter
.amount
, 16))
2098 set_other_error (mismatch_detail
, idx
,
2099 _("shift amount must be a multiple of 16"));
2102 if (!value_in_range_p (opnd
->shifter
.amount
, 0, size
* 8 - 16))
2104 set_sft_amount_out_of_range_error (mismatch_detail
, idx
,
2108 if (opnd
->imm
.value
< 0)
2110 set_other_error (mismatch_detail
, idx
,
2111 _("negative immediate value not allowed"));
2114 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, 16))
2116 set_other_error (mismatch_detail
, idx
,
2117 _("immediate out of range"));
2122 case AARCH64_OPND_IMM_MOV
:
2124 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2125 imm
= opnd
->imm
.value
;
2129 case OP_MOV_IMM_WIDEN
:
2132 case OP_MOV_IMM_WIDE
:
2133 if (!aarch64_wide_constant_p (imm
, esize
== 4, NULL
))
2135 set_other_error (mismatch_detail
, idx
,
2136 _("immediate out of range"));
2140 case OP_MOV_IMM_LOG
:
2141 if (!aarch64_logical_immediate_p (imm
, esize
, NULL
))
2143 set_other_error (mismatch_detail
, idx
,
2144 _("immediate out of range"));
2155 case AARCH64_OPND_NZCV
:
2156 case AARCH64_OPND_CCMP_IMM
:
2157 case AARCH64_OPND_EXCEPTION
:
2158 case AARCH64_OPND_UNDEFINED
:
2159 case AARCH64_OPND_TME_UIMM16
:
2160 case AARCH64_OPND_UIMM4
:
2161 case AARCH64_OPND_UIMM4_ADDG
:
2162 case AARCH64_OPND_UIMM7
:
2163 case AARCH64_OPND_UIMM3_OP1
:
2164 case AARCH64_OPND_UIMM3_OP2
:
2165 case AARCH64_OPND_SVE_UIMM3
:
2166 case AARCH64_OPND_SVE_UIMM7
:
2167 case AARCH64_OPND_SVE_UIMM8
:
2168 case AARCH64_OPND_SVE_UIMM8_53
:
2169 size
= get_operand_fields_width (get_operand_from_code (type
));
2171 if (!value_fit_unsigned_field_p (opnd
->imm
.value
, size
))
2173 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2179 case AARCH64_OPND_UIMM10
:
2180 /* Scaled unsigned 10 bits immediate offset. */
2181 if (!value_in_range_p (opnd
->imm
.value
, 0, 1008))
2183 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1008);
2187 if (!value_aligned_p (opnd
->imm
.value
, 16))
2189 set_unaligned_error (mismatch_detail
, idx
, 16);
2194 case AARCH64_OPND_SIMM5
:
2195 case AARCH64_OPND_SVE_SIMM5
:
2196 case AARCH64_OPND_SVE_SIMM5B
:
2197 case AARCH64_OPND_SVE_SIMM6
:
2198 case AARCH64_OPND_SVE_SIMM8
:
2199 size
= get_operand_fields_width (get_operand_from_code (type
));
2201 if (!value_fit_signed_field_p (opnd
->imm
.value
, size
))
2203 set_imm_out_of_range_error (mismatch_detail
, idx
,
2205 (1 << (size
- 1)) - 1);
2210 case AARCH64_OPND_WIDTH
:
2211 assert (idx
> 1 && opnds
[idx
-1].type
== AARCH64_OPND_IMM
2212 && opnds
[0].type
== AARCH64_OPND_Rd
);
2213 size
= get_upper_bound (qualifier
);
2214 if (opnd
->imm
.value
+ opnds
[idx
-1].imm
.value
> size
)
2215 /* lsb+width <= reg.size */
2217 set_imm_out_of_range_error (mismatch_detail
, idx
, 1,
2218 size
- opnds
[idx
-1].imm
.value
);
2223 case AARCH64_OPND_LIMM
:
2224 case AARCH64_OPND_SVE_LIMM
:
2226 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2227 uint64_t uimm
= opnd
->imm
.value
;
2228 if (opcode
->op
== OP_BIC
)
2230 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2232 set_other_error (mismatch_detail
, idx
,
2233 _("immediate out of range"));
2239 case AARCH64_OPND_IMM0
:
2240 case AARCH64_OPND_FPIMM0
:
2241 if (opnd
->imm
.value
!= 0)
2243 set_other_error (mismatch_detail
, idx
,
2244 _("immediate zero expected"));
2249 case AARCH64_OPND_IMM_ROT1
:
2250 case AARCH64_OPND_IMM_ROT2
:
2251 case AARCH64_OPND_SVE_IMM_ROT2
:
2252 if (opnd
->imm
.value
!= 0
2253 && opnd
->imm
.value
!= 90
2254 && opnd
->imm
.value
!= 180
2255 && opnd
->imm
.value
!= 270)
2257 set_other_error (mismatch_detail
, idx
,
2258 _("rotate expected to be 0, 90, 180 or 270"));
2263 case AARCH64_OPND_IMM_ROT3
:
2264 case AARCH64_OPND_SVE_IMM_ROT1
:
2265 case AARCH64_OPND_SVE_IMM_ROT3
:
2266 if (opnd
->imm
.value
!= 90 && opnd
->imm
.value
!= 270)
2268 set_other_error (mismatch_detail
, idx
,
2269 _("rotate expected to be 90 or 270"));
2274 case AARCH64_OPND_SHLL_IMM
:
2276 size
= 8 * aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2277 if (opnd
->imm
.value
!= size
)
2279 set_other_error (mismatch_detail
, idx
,
2280 _("invalid shift amount"));
2285 case AARCH64_OPND_IMM_VLSL
:
2286 size
= aarch64_get_qualifier_esize (qualifier
);
2287 if (!value_in_range_p (opnd
->imm
.value
, 0, size
* 8 - 1))
2289 set_imm_out_of_range_error (mismatch_detail
, idx
, 0,
2295 case AARCH64_OPND_IMM_VLSR
:
2296 size
= aarch64_get_qualifier_esize (qualifier
);
2297 if (!value_in_range_p (opnd
->imm
.value
, 1, size
* 8))
2299 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, size
* 8);
2304 case AARCH64_OPND_SIMD_IMM
:
2305 case AARCH64_OPND_SIMD_IMM_SFT
:
2306 /* Qualifier check. */
2309 case AARCH64_OPND_QLF_LSL
:
2310 if (opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2312 set_other_error (mismatch_detail
, idx
,
2313 _("invalid shift operator"));
2317 case AARCH64_OPND_QLF_MSL
:
2318 if (opnd
->shifter
.kind
!= AARCH64_MOD_MSL
)
2320 set_other_error (mismatch_detail
, idx
,
2321 _("invalid shift operator"));
2325 case AARCH64_OPND_QLF_NIL
:
2326 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2328 set_other_error (mismatch_detail
, idx
,
2329 _("shift is not permitted"));
2337 /* Is the immediate valid? */
2339 if (aarch64_get_qualifier_esize (opnds
[0].qualifier
) != 8)
2341 /* uimm8 or simm8 */
2342 if (!value_in_range_p (opnd
->imm
.value
, -128, 255))
2344 set_imm_out_of_range_error (mismatch_detail
, idx
, -128, 255);
2348 else if (aarch64_shrink_expanded_imm8 (opnd
->imm
.value
) < 0)
2351 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2352 ffffffffgggggggghhhhhhhh'. */
2353 set_other_error (mismatch_detail
, idx
,
2354 _("invalid value for immediate"));
2357 /* Is the shift amount valid? */
2358 switch (opnd
->shifter
.kind
)
2360 case AARCH64_MOD_LSL
:
2361 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2362 if (!value_in_range_p (opnd
->shifter
.amount
, 0, (size
- 1) * 8))
2364 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0,
2368 if (!value_aligned_p (opnd
->shifter
.amount
, 8))
2370 set_unaligned_error (mismatch_detail
, idx
, 8);
2374 case AARCH64_MOD_MSL
:
2375 /* Only 8 and 16 are valid shift amount. */
2376 if (opnd
->shifter
.amount
!= 8 && opnd
->shifter
.amount
!= 16)
2378 set_other_error (mismatch_detail
, idx
,
2379 _("shift amount must be 0 or 16"));
2384 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2386 set_other_error (mismatch_detail
, idx
,
2387 _("invalid shift operator"));
2394 case AARCH64_OPND_FPIMM
:
2395 case AARCH64_OPND_SIMD_FPIMM
:
2396 case AARCH64_OPND_SVE_FPIMM8
:
2397 if (opnd
->imm
.is_fp
== 0)
2399 set_other_error (mismatch_detail
, idx
,
2400 _("floating-point immediate expected"));
2403 /* The value is expected to be an 8-bit floating-point constant with
2404 sign, 3-bit exponent and normalized 4 bits of precision, encoded
2405 in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2407 if (!value_in_range_p (opnd
->imm
.value
, 0, 255))
2409 set_other_error (mismatch_detail
, idx
,
2410 _("immediate out of range"));
2413 if (opnd
->shifter
.kind
!= AARCH64_MOD_NONE
)
2415 set_other_error (mismatch_detail
, idx
,
2416 _("invalid shift operator"));
2421 case AARCH64_OPND_SVE_AIMM
:
2424 assert (opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2425 size
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2426 mask
= ~((uint64_t) -1 << (size
* 4) << (size
* 4));
2427 uvalue
= opnd
->imm
.value
;
2428 shift
= opnd
->shifter
.amount
;
2433 set_other_error (mismatch_detail
, idx
,
2434 _("no shift amount allowed for"
2435 " 8-bit constants"));
2441 if (shift
!= 0 && shift
!= 8)
2443 set_other_error (mismatch_detail
, idx
,
2444 _("shift amount must be 0 or 8"));
2447 if (shift
== 0 && (uvalue
& 0xff) == 0)
2450 uvalue
= (int64_t) uvalue
/ 256;
2454 if ((uvalue
& mask
) != uvalue
&& (uvalue
| ~mask
) != uvalue
)
2456 set_other_error (mismatch_detail
, idx
,
2457 _("immediate too big for element size"));
2460 uvalue
= (uvalue
- min_value
) & mask
;
2463 set_other_error (mismatch_detail
, idx
,
2464 _("invalid arithmetic immediate"));
2469 case AARCH64_OPND_SVE_ASIMM
:
2473 case AARCH64_OPND_SVE_I1_HALF_ONE
:
2474 assert (opnd
->imm
.is_fp
);
2475 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x3f800000)
2477 set_other_error (mismatch_detail
, idx
,
2478 _("floating-point value must be 0.5 or 1.0"));
2483 case AARCH64_OPND_SVE_I1_HALF_TWO
:
2484 assert (opnd
->imm
.is_fp
);
2485 if (opnd
->imm
.value
!= 0x3f000000 && opnd
->imm
.value
!= 0x40000000)
2487 set_other_error (mismatch_detail
, idx
,
2488 _("floating-point value must be 0.5 or 2.0"));
2493 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
2494 assert (opnd
->imm
.is_fp
);
2495 if (opnd
->imm
.value
!= 0 && opnd
->imm
.value
!= 0x3f800000)
2497 set_other_error (mismatch_detail
, idx
,
2498 _("floating-point value must be 0.0 or 1.0"));
2503 case AARCH64_OPND_SVE_INV_LIMM
:
2505 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2506 uint64_t uimm
= ~opnd
->imm
.value
;
2507 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2509 set_other_error (mismatch_detail
, idx
,
2510 _("immediate out of range"));
2516 case AARCH64_OPND_SVE_LIMM_MOV
:
2518 int esize
= aarch64_get_qualifier_esize (opnds
[0].qualifier
);
2519 uint64_t uimm
= opnd
->imm
.value
;
2520 if (!aarch64_logical_immediate_p (uimm
, esize
, NULL
))
2522 set_other_error (mismatch_detail
, idx
,
2523 _("immediate out of range"));
2526 if (!aarch64_sve_dupm_mov_immediate_p (uimm
, esize
))
2528 set_other_error (mismatch_detail
, idx
,
2529 _("invalid replicated MOV immediate"));
2535 case AARCH64_OPND_SVE_PATTERN_SCALED
:
2536 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL
);
2537 if (!value_in_range_p (opnd
->shifter
.amount
, 1, 16))
2539 set_multiplier_out_of_range_error (mismatch_detail
, idx
, 1, 16);
2544 case AARCH64_OPND_SVE_SHLIMM_PRED
:
2545 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
2546 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22
:
2547 size
= aarch64_get_qualifier_esize (opnds
[idx
- 1].qualifier
);
2548 if (!value_in_range_p (opnd
->imm
.value
, 0, 8 * size
- 1))
2550 set_imm_out_of_range_error (mismatch_detail
, idx
,
2556 case AARCH64_OPND_SVE_SHRIMM_PRED
:
2557 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
2558 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22
:
2559 num
= (type
== AARCH64_OPND_SVE_SHRIMM_UNPRED_22
) ? 2 : 1;
2560 size
= aarch64_get_qualifier_esize (opnds
[idx
- num
].qualifier
);
2561 if (!value_in_range_p (opnd
->imm
.value
, 1, 8 * size
))
2563 set_imm_out_of_range_error (mismatch_detail
, idx
, 1, 8*size
);
2573 case AARCH64_OPND_CLASS_SYSTEM
:
2576 case AARCH64_OPND_PSTATEFIELD
:
2577 assert (idx
== 0 && opnds
[1].type
== AARCH64_OPND_UIMM4
);
2581 The immediate must be #0 or #1. */
2582 if ((opnd
->pstatefield
== 0x03 /* UAO. */
2583 || opnd
->pstatefield
== 0x04 /* PAN. */
2584 || opnd
->pstatefield
== 0x19 /* SSBS. */
2585 || opnd
->pstatefield
== 0x1a) /* DIT. */
2586 && opnds
[1].imm
.value
> 1)
2588 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2591 /* MSR SPSel, #uimm4
2592 Uses uimm4 as a control value to select the stack pointer: if
2593 bit 0 is set it selects the current exception level's stack
2594 pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2595 Bits 1 to 3 of uimm4 are reserved and should be zero. */
2596 if (opnd
->pstatefield
== 0x05 /* spsel */ && opnds
[1].imm
.value
> 1)
2598 set_imm_out_of_range_error (mismatch_detail
, idx
, 0, 1);
2607 case AARCH64_OPND_CLASS_SIMD_ELEMENT
:
2608 /* Get the upper bound for the element index. */
2609 if (opcode
->op
== OP_FCMLA_ELEM
)
2610 /* FCMLA index range depends on the vector size of other operands
2611 and is halfed because complex numbers take two elements. */
2612 num
= aarch64_get_qualifier_nelem (opnds
[0].qualifier
)
2613 * aarch64_get_qualifier_esize (opnds
[0].qualifier
) / 2;
2616 num
= num
/ aarch64_get_qualifier_esize (qualifier
) - 1;
2617 assert (aarch64_get_qualifier_nelem (qualifier
) == 1);
2619 /* Index out-of-range. */
2620 if (!value_in_range_p (opnd
->reglane
.index
, 0, num
))
2622 set_elem_idx_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2625 /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2626 <Vm> Is the vector register (V0-V31) or (V0-V15), whose
2627 number is encoded in "size:M:Rm":
2633 if (type
== AARCH64_OPND_Em16
&& qualifier
== AARCH64_OPND_QLF_S_H
2634 && !value_in_range_p (opnd
->reglane
.regno
, 0, 15))
2636 set_regno_out_of_range_error (mismatch_detail
, idx
, 0, 15);
2641 case AARCH64_OPND_CLASS_MODIFIED_REG
:
2642 assert (idx
== 1 || idx
== 2);
2645 case AARCH64_OPND_Rm_EXT
:
2646 if (!aarch64_extend_operator_p (opnd
->shifter
.kind
)
2647 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
)
2649 set_other_error (mismatch_detail
, idx
,
2650 _("extend operator expected"));
2653 /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2654 (i.e. SP), in which case it defaults to LSL. The LSL alias is
2655 only valid when "Rd" or "Rn" is '11111', and is preferred in that
2657 if (!aarch64_stack_pointer_p (opnds
+ 0)
2658 && (idx
!= 2 || !aarch64_stack_pointer_p (opnds
+ 1)))
2660 if (!opnd
->shifter
.operator_present
)
2662 set_other_error (mismatch_detail
, idx
,
2663 _("missing extend operator"));
2666 else if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
2668 set_other_error (mismatch_detail
, idx
,
2669 _("'LSL' operator not allowed"));
2673 assert (opnd
->shifter
.operator_present
/* Default to LSL. */
2674 || opnd
->shifter
.kind
== AARCH64_MOD_LSL
);
2675 if (!value_in_range_p (opnd
->shifter
.amount
, 0, 4))
2677 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, 4);
2680 /* In the 64-bit form, the final register operand is written as Wm
2681 for all but the (possibly omitted) UXTX/LSL and SXTX
2683 N.B. GAS allows X register to be used with any operator as a
2684 programming convenience. */
2685 if (qualifier
== AARCH64_OPND_QLF_X
2686 && opnd
->shifter
.kind
!= AARCH64_MOD_LSL
2687 && opnd
->shifter
.kind
!= AARCH64_MOD_UXTX
2688 && opnd
->shifter
.kind
!= AARCH64_MOD_SXTX
)
2690 set_other_error (mismatch_detail
, idx
, _("W register expected"));
2695 case AARCH64_OPND_Rm_SFT
:
2696 /* ROR is not available to the shifted register operand in
2697 arithmetic instructions. */
2698 if (!aarch64_shift_operator_p (opnd
->shifter
.kind
))
2700 set_other_error (mismatch_detail
, idx
,
2701 _("shift operator expected"));
2704 if (opnd
->shifter
.kind
== AARCH64_MOD_ROR
2705 && opcode
->iclass
!= log_shift
)
2707 set_other_error (mismatch_detail
, idx
,
2708 _("'ROR' operator not allowed"));
2711 num
= qualifier
== AARCH64_OPND_QLF_W
? 31 : 63;
2712 if (!value_in_range_p (opnd
->shifter
.amount
, 0, num
))
2714 set_sft_amount_out_of_range_error (mismatch_detail
, idx
, 0, num
);
2731 /* Main entrypoint for the operand constraint checking.
2733 Return 1 if operands of *INST meet the constraint applied by the operand
2734 codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2735 not NULL, return the detail of the error in *MISMATCH_DETAIL. N.B. when
2736 adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2737 with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2738 error kind when it is notified that an instruction does not pass the check).
2740 Un-determined operand qualifiers may get established during the process. */
2743 aarch64_match_operands_constraint (aarch64_inst
*inst
,
2744 aarch64_operand_error
*mismatch_detail
)
2748 DEBUG_TRACE ("enter");
2750 /* Check for cases where a source register needs to be the same as the
2751 destination register. Do this before matching qualifiers since if
2752 an instruction has both invalid tying and invalid qualifiers,
2753 the error about qualifiers would suggest several alternative
2754 instructions that also have invalid tying. */
2755 i
= inst
->opcode
->tied_operand
;
2756 if (i
> 0 && (inst
->operands
[0].reg
.regno
!= inst
->operands
[i
].reg
.regno
))
2758 if (mismatch_detail
)
2760 mismatch_detail
->kind
= AARCH64_OPDE_UNTIED_OPERAND
;
2761 mismatch_detail
->index
= i
;
2762 mismatch_detail
->error
= NULL
;
2767 /* Match operands' qualifier.
2768 *INST has already had qualifier establish for some, if not all, of
2769 its operands; we need to find out whether these established
2770 qualifiers match one of the qualifier sequence in
2771 INST->OPCODE->QUALIFIERS_LIST. If yes, we will assign each operand
2772 with the corresponding qualifier in such a sequence.
2773 Only basic operand constraint checking is done here; the more thorough
2774 constraint checking will carried out by operand_general_constraint_met_p,
2775 which has be to called after this in order to get all of the operands'
2776 qualifiers established. */
2777 if (match_operands_qualifier (inst
, TRUE
/* update_p */) == 0)
2779 DEBUG_TRACE ("FAIL on operand qualifier matching");
2780 if (mismatch_detail
)
2782 /* Return an error type to indicate that it is the qualifier
2783 matching failure; we don't care about which operand as there
2784 are enough information in the opcode table to reproduce it. */
2785 mismatch_detail
->kind
= AARCH64_OPDE_INVALID_VARIANT
;
2786 mismatch_detail
->index
= -1;
2787 mismatch_detail
->error
= NULL
;
2792 /* Match operands' constraint. */
2793 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2795 enum aarch64_opnd type
= inst
->opcode
->operands
[i
];
2796 if (type
== AARCH64_OPND_NIL
)
2798 if (inst
->operands
[i
].skip
)
2800 DEBUG_TRACE ("skip the incomplete operand %d", i
);
2803 if (operand_general_constraint_met_p (inst
->operands
, i
, type
,
2804 inst
->opcode
, mismatch_detail
) == 0)
2806 DEBUG_TRACE ("FAIL on operand %d", i
);
2811 DEBUG_TRACE ("PASS");
2816 /* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2817 Also updates the TYPE of each INST->OPERANDS with the corresponding
2818 value of OPCODE->OPERANDS.
2820 Note that some operand qualifiers may need to be manually cleared by
2821 the caller before it further calls the aarch64_opcode_encode; by
2822 doing this, it helps the qualifier matching facilities work
2825 const aarch64_opcode
*
2826 aarch64_replace_opcode (aarch64_inst
*inst
, const aarch64_opcode
*opcode
)
2829 const aarch64_opcode
*old
= inst
->opcode
;
2831 inst
->opcode
= opcode
;
2833 /* Update the operand types. */
2834 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2836 inst
->operands
[i
].type
= opcode
->operands
[i
];
2837 if (opcode
->operands
[i
] == AARCH64_OPND_NIL
)
2841 DEBUG_TRACE ("replace %s with %s", old
->name
, opcode
->name
);
2847 aarch64_operand_index (const enum aarch64_opnd
*operands
, enum aarch64_opnd operand
)
2850 for (i
= 0; i
< AARCH64_MAX_OPND_NUM
; ++i
)
2851 if (operands
[i
] == operand
)
2853 else if (operands
[i
] == AARCH64_OPND_NIL
)
2858 /* R0...R30, followed by FOR31. */
2859 #define BANK(R, FOR31) \
2860 { R (0), R (1), R (2), R (3), R (4), R (5), R (6), R (7), \
2861 R (8), R (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2862 R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2863 R (24), R (25), R (26), R (27), R (28), R (29), R (30), FOR31 }
2864 /* [0][0] 32-bit integer regs with sp Wn
2865 [0][1] 64-bit integer regs with sp Xn sf=1
2866 [1][0] 32-bit integer regs with #0 Wn
2867 [1][1] 64-bit integer regs with #0 Xn sf=1 */
2868 static const char *int_reg
[2][2][32] = {
2869 #define R32(X) "w" #X
2870 #define R64(X) "x" #X
2871 { BANK (R32
, "wsp"), BANK (R64
, "sp") },
2872 { BANK (R32
, "wzr"), BANK (R64
, "xzr") }
2877 /* Names of the SVE vector registers, first with .S suffixes,
2878 then with .D suffixes. */
2880 static const char *sve_reg
[2][32] = {
2881 #define ZS(X) "z" #X ".s"
2882 #define ZD(X) "z" #X ".d"
2883 BANK (ZS
, ZS (31)), BANK (ZD
, ZD (31))
2889 /* Return the integer register name.
2890 if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg. */
2892 static inline const char *
2893 get_int_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
, int sp_reg_p
)
2895 const int has_zr
= sp_reg_p
? 0 : 1;
2896 const int is_64
= aarch64_get_qualifier_esize (qualifier
) == 4 ? 0 : 1;
2897 return int_reg
[has_zr
][is_64
][regno
];
2900 /* Like get_int_reg_name, but IS_64 is always 1. */
2902 static inline const char *
2903 get_64bit_int_reg_name (int regno
, int sp_reg_p
)
2905 const int has_zr
= sp_reg_p
? 0 : 1;
2906 return int_reg
[has_zr
][1][regno
];
2909 /* Get the name of the integer offset register in OPND, using the shift type
2910 to decide whether it's a word or doubleword. */
2912 static inline const char *
2913 get_offset_int_reg_name (const aarch64_opnd_info
*opnd
)
2915 switch (opnd
->shifter
.kind
)
2917 case AARCH64_MOD_UXTW
:
2918 case AARCH64_MOD_SXTW
:
2919 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_W
, 0);
2921 case AARCH64_MOD_LSL
:
2922 case AARCH64_MOD_SXTX
:
2923 return get_int_reg_name (opnd
->addr
.offset
.regno
, AARCH64_OPND_QLF_X
, 0);
2930 /* Get the name of the SVE vector offset register in OPND, using the operand
2931 qualifier to decide whether the suffix should be .S or .D. */
2933 static inline const char *
2934 get_addr_sve_reg_name (int regno
, aarch64_opnd_qualifier_t qualifier
)
2936 assert (qualifier
== AARCH64_OPND_QLF_S_S
2937 || qualifier
== AARCH64_OPND_QLF_S_D
);
2938 return sve_reg
[qualifier
== AARCH64_OPND_QLF_S_D
][regno
];
2941 /* Types for expanding an encoded 8-bit value to a floating-point value. */
2961 /* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2962 normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2963 (depending on the type of the instruction). IMM8 will be expanded to a
2964 single-precision floating-point value (SIZE == 4) or a double-precision
2965 floating-point value (SIZE == 8). A half-precision floating-point value
2966 (SIZE == 2) is expanded to a single-precision floating-point value. The
2967 expanded value is returned. */
2970 expand_fp_imm (int size
, uint32_t imm8
)
2973 uint32_t imm8_7
, imm8_6_0
, imm8_6
, imm8_6_repl4
;
2975 imm8_7
= (imm8
>> 7) & 0x01; /* imm8<7> */
2976 imm8_6_0
= imm8
& 0x7f; /* imm8<6:0> */
2977 imm8_6
= imm8_6_0
>> 6; /* imm8<6> */
2978 imm8_6_repl4
= (imm8_6
<< 3) | (imm8_6
<< 2)
2979 | (imm8_6
<< 1) | imm8_6
; /* Replicate(imm8<6>,4) */
2982 imm
= (imm8_7
<< (63-32)) /* imm8<7> */
2983 | ((imm8_6
^ 1) << (62-32)) /* NOT(imm8<6) */
2984 | (imm8_6_repl4
<< (58-32)) | (imm8_6
<< (57-32))
2985 | (imm8_6
<< (56-32)) | (imm8_6
<< (55-32)) /* Replicate(imm8<6>,7) */
2986 | (imm8_6_0
<< (48-32)); /* imm8<6>:imm8<5:0> */
2989 else if (size
== 4 || size
== 2)
2991 imm
= (imm8_7
<< 31) /* imm8<7> */
2992 | ((imm8_6
^ 1) << 30) /* NOT(imm8<6>) */
2993 | (imm8_6_repl4
<< 26) /* Replicate(imm8<6>,4) */
2994 | (imm8_6_0
<< 19); /* imm8<6>:imm8<5:0> */
2998 /* An unsupported size. */
3005 /* Produce the string representation of the register list operand *OPND
3006 in the buffer pointed by BUF of size SIZE. PREFIX is the part of
3007 the register name that comes before the register number, such as "v". */
3009 print_register_list (char *buf
, size_t size
, const aarch64_opnd_info
*opnd
,
3012 const int num_regs
= opnd
->reglist
.num_regs
;
3013 const int first_reg
= opnd
->reglist
.first_regno
;
3014 const int last_reg
= (first_reg
+ num_regs
- 1) & 0x1f;
3015 const char *qlf_name
= aarch64_get_qualifier_name (opnd
->qualifier
);
3016 char tb
[8]; /* Temporary buffer. */
3018 assert (opnd
->type
!= AARCH64_OPND_LEt
|| opnd
->reglist
.has_index
);
3019 assert (num_regs
>= 1 && num_regs
<= 4);
3021 /* Prepare the index if any. */
3022 if (opnd
->reglist
.has_index
)
3023 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3024 snprintf (tb
, 8, "[%" PRIi64
"]", (opnd
->reglist
.index
% 100));
3028 /* The hyphenated form is preferred for disassembly if there are
3029 more than two registers in the list, and the register numbers
3030 are monotonically increasing in increments of one. */
3031 if (num_regs
> 2 && last_reg
> first_reg
)
3032 snprintf (buf
, size
, "{%s%d.%s-%s%d.%s}%s", prefix
, first_reg
, qlf_name
,
3033 prefix
, last_reg
, qlf_name
, tb
);
3036 const int reg0
= first_reg
;
3037 const int reg1
= (first_reg
+ 1) & 0x1f;
3038 const int reg2
= (first_reg
+ 2) & 0x1f;
3039 const int reg3
= (first_reg
+ 3) & 0x1f;
3044 snprintf (buf
, size
, "{%s%d.%s}%s", prefix
, reg0
, qlf_name
, tb
);
3047 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s}%s", prefix
, reg0
, qlf_name
,
3048 prefix
, reg1
, qlf_name
, tb
);
3051 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3052 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
3053 prefix
, reg2
, qlf_name
, tb
);
3056 snprintf (buf
, size
, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3057 prefix
, reg0
, qlf_name
, prefix
, reg1
, qlf_name
,
3058 prefix
, reg2
, qlf_name
, prefix
, reg3
, qlf_name
, tb
);
3064 /* Print the register+immediate address in OPND to BUF, which has SIZE
3065 characters. BASE is the name of the base register. */
3068 print_immediate_offset_address (char *buf
, size_t size
,
3069 const aarch64_opnd_info
*opnd
,
3072 if (opnd
->addr
.writeback
)
3074 if (opnd
->addr
.preind
)
3076 if (opnd
->type
== AARCH64_OPND_ADDR_SIMM10
&& !opnd
->addr
.offset
.imm
)
3077 snprintf (buf
, size
, "[%s]!", base
);
3079 snprintf (buf
, size
, "[%s, #%d]!", base
, opnd
->addr
.offset
.imm
);
3082 snprintf (buf
, size
, "[%s], #%d", base
, opnd
->addr
.offset
.imm
);
3086 if (opnd
->shifter
.operator_present
)
3088 assert (opnd
->shifter
.kind
== AARCH64_MOD_MUL_VL
);
3089 snprintf (buf
, size
, "[%s, #%d, mul vl]",
3090 base
, opnd
->addr
.offset
.imm
);
3092 else if (opnd
->addr
.offset
.imm
)
3093 snprintf (buf
, size
, "[%s, #%d]", base
, opnd
->addr
.offset
.imm
);
3095 snprintf (buf
, size
, "[%s]", base
);
3099 /* Produce the string representation of the register offset address operand
3100 *OPND in the buffer pointed by BUF of size SIZE. BASE and OFFSET are
3101 the names of the base and offset registers. */
3103 print_register_offset_address (char *buf
, size_t size
,
3104 const aarch64_opnd_info
*opnd
,
3105 const char *base
, const char *offset
)
3107 char tb
[16]; /* Temporary buffer. */
3108 bfd_boolean print_extend_p
= TRUE
;
3109 bfd_boolean print_amount_p
= TRUE
;
3110 const char *shift_name
= aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
;
3112 if (!opnd
->shifter
.amount
&& (opnd
->qualifier
!= AARCH64_OPND_QLF_S_B
3113 || !opnd
->shifter
.amount_present
))
3115 /* Not print the shift/extend amount when the amount is zero and
3116 when it is not the special case of 8-bit load/store instruction. */
3117 print_amount_p
= FALSE
;
3118 /* Likewise, no need to print the shift operator LSL in such a
3120 if (opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3121 print_extend_p
= FALSE
;
3124 /* Prepare for the extend/shift. */
3128 snprintf (tb
, sizeof (tb
), ", %s #%" PRIi64
, shift_name
,
3129 /* PR 21096: The %100 is to silence a warning about possible truncation. */
3130 (opnd
->shifter
.amount
% 100));
3132 snprintf (tb
, sizeof (tb
), ", %s", shift_name
);
3137 snprintf (buf
, size
, "[%s, %s%s]", base
, offset
, tb
);
3140 /* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3141 in *BUF. The caller should pass in the maximum size of *BUF in SIZE.
3142 PC, PCREL_P and ADDRESS are used to pass in and return information about
3143 the PC-relative address calculation, where the PC value is passed in
3144 PC. If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3145 will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3146 calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3148 The function serves both the disassembler and the assembler diagnostics
3149 issuer, which is the reason why it lives in this file. */
3152 aarch64_print_operand (char *buf
, size_t size
, bfd_vma pc
,
3153 const aarch64_opcode
*opcode
,
3154 const aarch64_opnd_info
*opnds
, int idx
, int *pcrel_p
,
3155 bfd_vma
*address
, char** notes
,
3156 aarch64_feature_set features
)
3158 unsigned int i
, num_conds
;
3159 const char *name
= NULL
;
3160 const aarch64_opnd_info
*opnd
= opnds
+ idx
;
3161 enum aarch64_modifier_kind kind
;
3162 uint64_t addr
, enum_value
;
3170 case AARCH64_OPND_Rd
:
3171 case AARCH64_OPND_Rn
:
3172 case AARCH64_OPND_Rm
:
3173 case AARCH64_OPND_Rt
:
3174 case AARCH64_OPND_Rt2
:
3175 case AARCH64_OPND_Rs
:
3176 case AARCH64_OPND_Ra
:
3177 case AARCH64_OPND_Rt_LS64
:
3178 case AARCH64_OPND_Rt_SYS
:
3179 case AARCH64_OPND_PAIRREG
:
3180 case AARCH64_OPND_SVE_Rm
:
3181 /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3182 the <ic_op>, therefore we use opnd->present to override the
3183 generic optional-ness information. */
3184 if (opnd
->type
== AARCH64_OPND_Rt_SYS
)
3189 /* Omit the operand, e.g. RET. */
3190 else if (optional_operand_p (opcode
, idx
)
3192 == get_optional_operand_default_value (opcode
)))
3194 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3195 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3196 snprintf (buf
, size
, "%s",
3197 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3200 case AARCH64_OPND_Rd_SP
:
3201 case AARCH64_OPND_Rn_SP
:
3202 case AARCH64_OPND_Rt_SP
:
3203 case AARCH64_OPND_SVE_Rn_SP
:
3204 case AARCH64_OPND_Rm_SP
:
3205 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3206 || opnd
->qualifier
== AARCH64_OPND_QLF_WSP
3207 || opnd
->qualifier
== AARCH64_OPND_QLF_X
3208 || opnd
->qualifier
== AARCH64_OPND_QLF_SP
);
3209 snprintf (buf
, size
, "%s",
3210 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 1));
3213 case AARCH64_OPND_Rm_EXT
:
3214 kind
= opnd
->shifter
.kind
;
3215 assert (idx
== 1 || idx
== 2);
3216 if ((aarch64_stack_pointer_p (opnds
)
3217 || (idx
== 2 && aarch64_stack_pointer_p (opnds
+ 1)))
3218 && ((opnd
->qualifier
== AARCH64_OPND_QLF_W
3219 && opnds
[0].qualifier
== AARCH64_OPND_QLF_W
3220 && kind
== AARCH64_MOD_UXTW
)
3221 || (opnd
->qualifier
== AARCH64_OPND_QLF_X
3222 && kind
== AARCH64_MOD_UXTX
)))
3224 /* 'LSL' is the preferred form in this case. */
3225 kind
= AARCH64_MOD_LSL
;
3226 if (opnd
->shifter
.amount
== 0)
3228 /* Shifter omitted. */
3229 snprintf (buf
, size
, "%s",
3230 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3234 if (opnd
->shifter
.amount
)
3235 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3236 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3237 aarch64_operand_modifiers
[kind
].name
,
3238 opnd
->shifter
.amount
);
3240 snprintf (buf
, size
, "%s, %s",
3241 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3242 aarch64_operand_modifiers
[kind
].name
);
3245 case AARCH64_OPND_Rm_SFT
:
3246 assert (opnd
->qualifier
== AARCH64_OPND_QLF_W
3247 || opnd
->qualifier
== AARCH64_OPND_QLF_X
);
3248 if (opnd
->shifter
.amount
== 0 && opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3249 snprintf (buf
, size
, "%s",
3250 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0));
3252 snprintf (buf
, size
, "%s, %s #%" PRIi64
,
3253 get_int_reg_name (opnd
->reg
.regno
, opnd
->qualifier
, 0),
3254 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3255 opnd
->shifter
.amount
);
3258 case AARCH64_OPND_Fd
:
3259 case AARCH64_OPND_Fn
:
3260 case AARCH64_OPND_Fm
:
3261 case AARCH64_OPND_Fa
:
3262 case AARCH64_OPND_Ft
:
3263 case AARCH64_OPND_Ft2
:
3264 case AARCH64_OPND_Sd
:
3265 case AARCH64_OPND_Sn
:
3266 case AARCH64_OPND_Sm
:
3267 case AARCH64_OPND_SVE_VZn
:
3268 case AARCH64_OPND_SVE_Vd
:
3269 case AARCH64_OPND_SVE_Vm
:
3270 case AARCH64_OPND_SVE_Vn
:
3271 snprintf (buf
, size
, "%s%d", aarch64_get_qualifier_name (opnd
->qualifier
),
3275 case AARCH64_OPND_Va
:
3276 case AARCH64_OPND_Vd
:
3277 case AARCH64_OPND_Vn
:
3278 case AARCH64_OPND_Vm
:
3279 snprintf (buf
, size
, "v%d.%s", opnd
->reg
.regno
,
3280 aarch64_get_qualifier_name (opnd
->qualifier
));
3283 case AARCH64_OPND_Ed
:
3284 case AARCH64_OPND_En
:
3285 case AARCH64_OPND_Em
:
3286 case AARCH64_OPND_Em16
:
3287 case AARCH64_OPND_SM3_IMM2
:
3288 snprintf (buf
, size
, "v%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3289 aarch64_get_qualifier_name (opnd
->qualifier
),
3290 opnd
->reglane
.index
);
3293 case AARCH64_OPND_VdD1
:
3294 case AARCH64_OPND_VnD1
:
3295 snprintf (buf
, size
, "v%d.d[1]", opnd
->reg
.regno
);
3298 case AARCH64_OPND_LVn
:
3299 case AARCH64_OPND_LVt
:
3300 case AARCH64_OPND_LVt_AL
:
3301 case AARCH64_OPND_LEt
:
3302 print_register_list (buf
, size
, opnd
, "v");
3305 case AARCH64_OPND_SVE_Pd
:
3306 case AARCH64_OPND_SVE_Pg3
:
3307 case AARCH64_OPND_SVE_Pg4_5
:
3308 case AARCH64_OPND_SVE_Pg4_10
:
3309 case AARCH64_OPND_SVE_Pg4_16
:
3310 case AARCH64_OPND_SVE_Pm
:
3311 case AARCH64_OPND_SVE_Pn
:
3312 case AARCH64_OPND_SVE_Pt
:
3313 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3314 snprintf (buf
, size
, "p%d", opnd
->reg
.regno
);
3315 else if (opnd
->qualifier
== AARCH64_OPND_QLF_P_Z
3316 || opnd
->qualifier
== AARCH64_OPND_QLF_P_M
)
3317 snprintf (buf
, size
, "p%d/%s", opnd
->reg
.regno
,
3318 aarch64_get_qualifier_name (opnd
->qualifier
));
3320 snprintf (buf
, size
, "p%d.%s", opnd
->reg
.regno
,
3321 aarch64_get_qualifier_name (opnd
->qualifier
));
3324 case AARCH64_OPND_SVE_Za_5
:
3325 case AARCH64_OPND_SVE_Za_16
:
3326 case AARCH64_OPND_SVE_Zd
:
3327 case AARCH64_OPND_SVE_Zm_5
:
3328 case AARCH64_OPND_SVE_Zm_16
:
3329 case AARCH64_OPND_SVE_Zn
:
3330 case AARCH64_OPND_SVE_Zt
:
3331 if (opnd
->qualifier
== AARCH64_OPND_QLF_NIL
)
3332 snprintf (buf
, size
, "z%d", opnd
->reg
.regno
);
3334 snprintf (buf
, size
, "z%d.%s", opnd
->reg
.regno
,
3335 aarch64_get_qualifier_name (opnd
->qualifier
));
3338 case AARCH64_OPND_SVE_ZnxN
:
3339 case AARCH64_OPND_SVE_ZtxN
:
3340 print_register_list (buf
, size
, opnd
, "z");
3343 case AARCH64_OPND_SVE_Zm3_INDEX
:
3344 case AARCH64_OPND_SVE_Zm3_22_INDEX
:
3345 case AARCH64_OPND_SVE_Zm3_11_INDEX
:
3346 case AARCH64_OPND_SVE_Zm4_11_INDEX
:
3347 case AARCH64_OPND_SVE_Zm4_INDEX
:
3348 case AARCH64_OPND_SVE_Zn_INDEX
:
3349 snprintf (buf
, size
, "z%d.%s[%" PRIi64
"]", opnd
->reglane
.regno
,
3350 aarch64_get_qualifier_name (opnd
->qualifier
),
3351 opnd
->reglane
.index
);
3354 case AARCH64_OPND_CRn
:
3355 case AARCH64_OPND_CRm
:
3356 snprintf (buf
, size
, "C%" PRIi64
, opnd
->imm
.value
);
3359 case AARCH64_OPND_IDX
:
3360 case AARCH64_OPND_MASK
:
3361 case AARCH64_OPND_IMM
:
3362 case AARCH64_OPND_IMM_2
:
3363 case AARCH64_OPND_WIDTH
:
3364 case AARCH64_OPND_UIMM3_OP1
:
3365 case AARCH64_OPND_UIMM3_OP2
:
3366 case AARCH64_OPND_BIT_NUM
:
3367 case AARCH64_OPND_IMM_VLSL
:
3368 case AARCH64_OPND_IMM_VLSR
:
3369 case AARCH64_OPND_SHLL_IMM
:
3370 case AARCH64_OPND_IMM0
:
3371 case AARCH64_OPND_IMMR
:
3372 case AARCH64_OPND_IMMS
:
3373 case AARCH64_OPND_UNDEFINED
:
3374 case AARCH64_OPND_FBITS
:
3375 case AARCH64_OPND_TME_UIMM16
:
3376 case AARCH64_OPND_SIMM5
:
3377 case AARCH64_OPND_SVE_SHLIMM_PRED
:
3378 case AARCH64_OPND_SVE_SHLIMM_UNPRED
:
3379 case AARCH64_OPND_SVE_SHLIMM_UNPRED_22
:
3380 case AARCH64_OPND_SVE_SHRIMM_PRED
:
3381 case AARCH64_OPND_SVE_SHRIMM_UNPRED
:
3382 case AARCH64_OPND_SVE_SHRIMM_UNPRED_22
:
3383 case AARCH64_OPND_SVE_SIMM5
:
3384 case AARCH64_OPND_SVE_SIMM5B
:
3385 case AARCH64_OPND_SVE_SIMM6
:
3386 case AARCH64_OPND_SVE_SIMM8
:
3387 case AARCH64_OPND_SVE_UIMM3
:
3388 case AARCH64_OPND_SVE_UIMM7
:
3389 case AARCH64_OPND_SVE_UIMM8
:
3390 case AARCH64_OPND_SVE_UIMM8_53
:
3391 case AARCH64_OPND_IMM_ROT1
:
3392 case AARCH64_OPND_IMM_ROT2
:
3393 case AARCH64_OPND_IMM_ROT3
:
3394 case AARCH64_OPND_SVE_IMM_ROT1
:
3395 case AARCH64_OPND_SVE_IMM_ROT2
:
3396 case AARCH64_OPND_SVE_IMM_ROT3
:
3397 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3400 case AARCH64_OPND_SVE_I1_HALF_ONE
:
3401 case AARCH64_OPND_SVE_I1_HALF_TWO
:
3402 case AARCH64_OPND_SVE_I1_ZERO_ONE
:
3405 c
.i
= opnd
->imm
.value
;
3406 snprintf (buf
, size
, "#%.1f", c
.f
);
3410 case AARCH64_OPND_SVE_PATTERN
:
3411 if (optional_operand_p (opcode
, idx
)
3412 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3414 enum_value
= opnd
->imm
.value
;
3415 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3416 if (aarch64_sve_pattern_array
[enum_value
])
3417 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[enum_value
]);
3419 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3422 case AARCH64_OPND_SVE_PATTERN_SCALED
:
3423 if (optional_operand_p (opcode
, idx
)
3424 && !opnd
->shifter
.operator_present
3425 && opnd
->imm
.value
== get_optional_operand_default_value (opcode
))
3427 enum_value
= opnd
->imm
.value
;
3428 assert (enum_value
< ARRAY_SIZE (aarch64_sve_pattern_array
));
3429 if (aarch64_sve_pattern_array
[opnd
->imm
.value
])
3430 snprintf (buf
, size
, "%s", aarch64_sve_pattern_array
[opnd
->imm
.value
]);
3432 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3433 if (opnd
->shifter
.operator_present
)
3435 size_t len
= strlen (buf
);
3436 snprintf (buf
+ len
, size
- len
, ", %s #%" PRIi64
,
3437 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3438 opnd
->shifter
.amount
);
3442 case AARCH64_OPND_SVE_PRFOP
:
3443 enum_value
= opnd
->imm
.value
;
3444 assert (enum_value
< ARRAY_SIZE (aarch64_sve_prfop_array
));
3445 if (aarch64_sve_prfop_array
[enum_value
])
3446 snprintf (buf
, size
, "%s", aarch64_sve_prfop_array
[enum_value
]);
3448 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3451 case AARCH64_OPND_IMM_MOV
:
3452 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3454 case 4: /* e.g. MOV Wd, #<imm32>. */
3456 int imm32
= opnd
->imm
.value
;
3457 snprintf (buf
, size
, "#0x%-20x\t// #%d", imm32
, imm32
);
3460 case 8: /* e.g. MOV Xd, #<imm64>. */
3461 snprintf (buf
, size
, "#0x%-20" PRIx64
"\t// #%" PRIi64
,
3462 opnd
->imm
.value
, opnd
->imm
.value
);
3464 default: assert (0);
3468 case AARCH64_OPND_FPIMM0
:
3469 snprintf (buf
, size
, "#0.0");
3472 case AARCH64_OPND_LIMM
:
3473 case AARCH64_OPND_AIMM
:
3474 case AARCH64_OPND_HALF
:
3475 case AARCH64_OPND_SVE_INV_LIMM
:
3476 case AARCH64_OPND_SVE_LIMM
:
3477 case AARCH64_OPND_SVE_LIMM_MOV
:
3478 if (opnd
->shifter
.amount
)
3479 snprintf (buf
, size
, "#0x%" PRIx64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3480 opnd
->shifter
.amount
);
3482 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3485 case AARCH64_OPND_SIMD_IMM
:
3486 case AARCH64_OPND_SIMD_IMM_SFT
:
3487 if ((! opnd
->shifter
.amount
&& opnd
->shifter
.kind
== AARCH64_MOD_LSL
)
3488 || opnd
->shifter
.kind
== AARCH64_MOD_NONE
)
3489 snprintf (buf
, size
, "#0x%" PRIx64
, opnd
->imm
.value
);
3491 snprintf (buf
, size
, "#0x%" PRIx64
", %s #%" PRIi64
, opnd
->imm
.value
,
3492 aarch64_operand_modifiers
[opnd
->shifter
.kind
].name
,
3493 opnd
->shifter
.amount
);
3496 case AARCH64_OPND_SVE_AIMM
:
3497 case AARCH64_OPND_SVE_ASIMM
:
3498 if (opnd
->shifter
.amount
)
3499 snprintf (buf
, size
, "#%" PRIi64
", lsl #%" PRIi64
, opnd
->imm
.value
,
3500 opnd
->shifter
.amount
);
3502 snprintf (buf
, size
, "#%" PRIi64
, opnd
->imm
.value
);
3505 case AARCH64_OPND_FPIMM
:
3506 case AARCH64_OPND_SIMD_FPIMM
:
3507 case AARCH64_OPND_SVE_FPIMM8
:
3508 switch (aarch64_get_qualifier_esize (opnds
[0].qualifier
))
3510 case 2: /* e.g. FMOV <Hd>, #<imm>. */
3513 c
.i
= expand_fp_imm (2, opnd
->imm
.value
);
3514 snprintf (buf
, size
, "#%.18e", c
.f
);
3517 case 4: /* e.g. FMOV <Vd>.4S, #<imm>. */
3520 c
.i
= expand_fp_imm (4, opnd
->imm
.value
);
3521 snprintf (buf
, size
, "#%.18e", c
.f
);
3524 case 8: /* e.g. FMOV <Sd>, #<imm>. */
3527 c
.i
= expand_fp_imm (8, opnd
->imm
.value
);
3528 snprintf (buf
, size
, "#%.18e", c
.d
);
3531 default: assert (0);
3535 case AARCH64_OPND_CCMP_IMM
:
3536 case AARCH64_OPND_NZCV
:
3537 case AARCH64_OPND_EXCEPTION
:
3538 case AARCH64_OPND_UIMM4
:
3539 case AARCH64_OPND_UIMM4_ADDG
:
3540 case AARCH64_OPND_UIMM7
:
3541 case AARCH64_OPND_UIMM10
:
3542 if (optional_operand_p (opcode
, idx
) == TRUE
3543 && (opnd
->imm
.value
==
3544 (int64_t) get_optional_operand_default_value (opcode
)))
3545 /* Omit the operand, e.g. DCPS1. */
3547 snprintf (buf
, size
, "#0x%x", (unsigned int)opnd
->imm
.value
);
3550 case AARCH64_OPND_COND
:
3551 case AARCH64_OPND_COND1
:
3552 snprintf (buf
, size
, "%s", opnd
->cond
->names
[0]);
3553 num_conds
= ARRAY_SIZE (opnd
->cond
->names
);
3554 for (i
= 1; i
< num_conds
&& opnd
->cond
->names
[i
]; ++i
)
3556 size_t len
= strlen (buf
);
3558 snprintf (buf
+ len
, size
- len
, " // %s = %s",
3559 opnd
->cond
->names
[0], opnd
->cond
->names
[i
]);
3561 snprintf (buf
+ len
, size
- len
, ", %s",
3562 opnd
->cond
->names
[i
]);
3566 case AARCH64_OPND_ADDR_ADRP
:
3567 addr
= ((pc
+ AARCH64_PCREL_OFFSET
) & ~(uint64_t)0xfff)
3573 /* This is not necessary during the disassembling, as print_address_func
3574 in the disassemble_info will take care of the printing. But some
3575 other callers may be still interested in getting the string in *STR,
3576 so here we do snprintf regardless. */
3577 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3580 case AARCH64_OPND_ADDR_PCREL14
:
3581 case AARCH64_OPND_ADDR_PCREL19
:
3582 case AARCH64_OPND_ADDR_PCREL21
:
3583 case AARCH64_OPND_ADDR_PCREL26
:
3584 addr
= pc
+ AARCH64_PCREL_OFFSET
+ opnd
->imm
.value
;
3589 /* This is not necessary during the disassembling, as print_address_func
3590 in the disassemble_info will take care of the printing. But some
3591 other callers may be still interested in getting the string in *STR,
3592 so here we do snprintf regardless. */
3593 snprintf (buf
, size
, "#0x%" PRIx64
, addr
);
3596 case AARCH64_OPND_ADDR_SIMPLE
:
3597 case AARCH64_OPND_SIMD_ADDR_SIMPLE
:
3598 case AARCH64_OPND_SIMD_ADDR_POST
:
3599 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3600 if (opnd
->type
== AARCH64_OPND_SIMD_ADDR_POST
)
3602 if (opnd
->addr
.offset
.is_reg
)
3603 snprintf (buf
, size
, "[%s], x%d", name
, opnd
->addr
.offset
.regno
);
3605 snprintf (buf
, size
, "[%s], #%d", name
, opnd
->addr
.offset
.imm
);
3608 snprintf (buf
, size
, "[%s]", name
);
3611 case AARCH64_OPND_ADDR_REGOFF
:
3612 case AARCH64_OPND_SVE_ADDR_R
:
3613 case AARCH64_OPND_SVE_ADDR_RR
:
3614 case AARCH64_OPND_SVE_ADDR_RR_LSL1
:
3615 case AARCH64_OPND_SVE_ADDR_RR_LSL2
:
3616 case AARCH64_OPND_SVE_ADDR_RR_LSL3
:
3617 case AARCH64_OPND_SVE_ADDR_RX
:
3618 case AARCH64_OPND_SVE_ADDR_RX_LSL1
:
3619 case AARCH64_OPND_SVE_ADDR_RX_LSL2
:
3620 case AARCH64_OPND_SVE_ADDR_RX_LSL3
:
3621 print_register_offset_address
3622 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3623 get_offset_int_reg_name (opnd
));
3626 case AARCH64_OPND_SVE_ADDR_ZX
:
3627 print_register_offset_address
3629 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
),
3630 get_64bit_int_reg_name (opnd
->addr
.offset
.regno
, 0));
3633 case AARCH64_OPND_SVE_ADDR_RZ
:
3634 case AARCH64_OPND_SVE_ADDR_RZ_LSL1
:
3635 case AARCH64_OPND_SVE_ADDR_RZ_LSL2
:
3636 case AARCH64_OPND_SVE_ADDR_RZ_LSL3
:
3637 case AARCH64_OPND_SVE_ADDR_RZ_XTW_14
:
3638 case AARCH64_OPND_SVE_ADDR_RZ_XTW_22
:
3639 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14
:
3640 case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22
:
3641 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14
:
3642 case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22
:
3643 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14
:
3644 case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22
:
3645 print_register_offset_address
3646 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1),
3647 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3650 case AARCH64_OPND_ADDR_SIMM7
:
3651 case AARCH64_OPND_ADDR_SIMM9
:
3652 case AARCH64_OPND_ADDR_SIMM9_2
:
3653 case AARCH64_OPND_ADDR_SIMM10
:
3654 case AARCH64_OPND_ADDR_SIMM11
:
3655 case AARCH64_OPND_ADDR_SIMM13
:
3656 case AARCH64_OPND_ADDR_OFFSET
:
3657 case AARCH64_OPND_SVE_ADDR_RI_S4x16
:
3658 case AARCH64_OPND_SVE_ADDR_RI_S4x32
:
3659 case AARCH64_OPND_SVE_ADDR_RI_S4xVL
:
3660 case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL
:
3661 case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL
:
3662 case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL
:
3663 case AARCH64_OPND_SVE_ADDR_RI_S6xVL
:
3664 case AARCH64_OPND_SVE_ADDR_RI_S9xVL
:
3665 case AARCH64_OPND_SVE_ADDR_RI_U6
:
3666 case AARCH64_OPND_SVE_ADDR_RI_U6x2
:
3667 case AARCH64_OPND_SVE_ADDR_RI_U6x4
:
3668 case AARCH64_OPND_SVE_ADDR_RI_U6x8
:
3669 print_immediate_offset_address
3670 (buf
, size
, opnd
, get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1));
3673 case AARCH64_OPND_SVE_ADDR_ZI_U5
:
3674 case AARCH64_OPND_SVE_ADDR_ZI_U5x2
:
3675 case AARCH64_OPND_SVE_ADDR_ZI_U5x4
:
3676 case AARCH64_OPND_SVE_ADDR_ZI_U5x8
:
3677 print_immediate_offset_address
3679 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
));
3682 case AARCH64_OPND_SVE_ADDR_ZZ_LSL
:
3683 case AARCH64_OPND_SVE_ADDR_ZZ_SXTW
:
3684 case AARCH64_OPND_SVE_ADDR_ZZ_UXTW
:
3685 print_register_offset_address
3687 get_addr_sve_reg_name (opnd
->addr
.base_regno
, opnd
->qualifier
),
3688 get_addr_sve_reg_name (opnd
->addr
.offset
.regno
, opnd
->qualifier
));
3691 case AARCH64_OPND_ADDR_UIMM12
:
3692 name
= get_64bit_int_reg_name (opnd
->addr
.base_regno
, 1);
3693 if (opnd
->addr
.offset
.imm
)
3694 snprintf (buf
, size
, "[%s, #%d]", name
, opnd
->addr
.offset
.imm
);
3696 snprintf (buf
, size
, "[%s]", name
);
3699 case AARCH64_OPND_SYSREG
:
3700 for (i
= 0; aarch64_sys_regs
[i
].name
; ++i
)
3702 const aarch64_sys_reg
*sr
= aarch64_sys_regs
+ i
;
3704 bfd_boolean exact_match
3705 = (!(sr
->flags
& (F_REG_READ
| F_REG_WRITE
))
3706 || (sr
->flags
& opnd
->sysreg
.flags
) == opnd
->sysreg
.flags
)
3707 && AARCH64_CPU_HAS_FEATURE (features
, sr
->features
);
3709 /* Try and find an exact match, But if that fails, return the first
3710 partial match that was found. */
3711 if (aarch64_sys_regs
[i
].value
== opnd
->sysreg
.value
3712 && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs
[i
].flags
)
3713 && (name
== NULL
|| exact_match
))
3715 name
= aarch64_sys_regs
[i
].name
;
3723 /* If we didn't match exactly, that means the presense of a flag
3724 indicates what we didn't want for this instruction. e.g. If
3725 F_REG_READ is there, that means we were looking for a write
3726 register. See aarch64_ext_sysreg. */
3727 if (aarch64_sys_regs
[i
].flags
& F_REG_WRITE
)
3728 *notes
= _("reading from a write-only register");
3729 else if (aarch64_sys_regs
[i
].flags
& F_REG_READ
)
3730 *notes
= _("writing to a read-only register");
3735 snprintf (buf
, size
, "%s", name
);
3738 /* Implementation defined system register. */
3739 unsigned int value
= opnd
->sysreg
.value
;
3740 snprintf (buf
, size
, "s%u_%u_c%u_c%u_%u", (value
>> 14) & 0x3,
3741 (value
>> 11) & 0x7, (value
>> 7) & 0xf, (value
>> 3) & 0xf,
3746 case AARCH64_OPND_PSTATEFIELD
:
3747 for (i
= 0; aarch64_pstatefields
[i
].name
; ++i
)
3748 if (aarch64_pstatefields
[i
].value
== opnd
->pstatefield
)
3750 assert (aarch64_pstatefields
[i
].name
);
3751 snprintf (buf
, size
, "%s", aarch64_pstatefields
[i
].name
);
3754 case AARCH64_OPND_SYSREG_AT
:
3755 case AARCH64_OPND_SYSREG_DC
:
3756 case AARCH64_OPND_SYSREG_IC
:
3757 case AARCH64_OPND_SYSREG_TLBI
:
3758 case AARCH64_OPND_SYSREG_SR
:
3759 snprintf (buf
, size
, "%s", opnd
->sysins_op
->name
);
3762 case AARCH64_OPND_BARRIER
:
3763 case AARCH64_OPND_BARRIER_DSB_NXS
:
3764 snprintf (buf
, size
, "%s", opnd
->barrier
->name
);
3767 case AARCH64_OPND_BARRIER_ISB
:
3768 /* Operand can be omitted, e.g. in DCPS1. */
3769 if (! optional_operand_p (opcode
, idx
)
3770 || (opnd
->barrier
->value
3771 != get_optional_operand_default_value (opcode
)))
3772 snprintf (buf
, size
, "#0x%x", opnd
->barrier
->value
);
3775 case AARCH64_OPND_PRFOP
:
3776 if (opnd
->prfop
->name
!= NULL
)
3777 snprintf (buf
, size
, "%s", opnd
->prfop
->name
);
3779 snprintf (buf
, size
, "#0x%02x", opnd
->prfop
->value
);
3782 case AARCH64_OPND_BARRIER_PSB
:
3783 snprintf (buf
, size
, "csync");
3786 case AARCH64_OPND_BTI_TARGET
:
3787 if ((HINT_FLAG (opnd
->hint_option
->value
) & HINT_OPD_F_NOPRINT
) == 0)
3788 snprintf (buf
, size
, "%s", opnd
->hint_option
->name
);
3791 case AARCH64_OPND_CSRE_CSR
:
3792 snprintf (buf
, size
, "pdec");
3800 #define CPENC(op0,op1,crn,crm,op2) \
3801 ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3802 /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3803 #define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3804 /* for 3.9.10 System Instructions */
3805 #define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3824 #define SYSREG(name, encoding, flags, features) \
3825 { name, encoding, flags, features }
3827 #define SR_CORE(n,e,f) SYSREG (n,e,f,0)
3829 #define SR_FEAT(n,e,f,feat) \
3830 SYSREG ((n), (e), (f) | F_ARCHEXT, AARCH64_FEATURE_##feat)
3832 #define SR_FEAT2(n,e,f,fe1,fe2) \
3833 SYSREG ((n), (e), (f) | F_ARCHEXT, \
3834 AARCH64_FEATURE_##fe1 | AARCH64_FEATURE_##fe2)
3836 #define SR_RNG(n,e,f) SR_FEAT2(n,e,f,RNG,V8_5)
3837 #define SR_V8_1_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_1)
3838 #define SR_V8_4_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_4)
3840 #define SR_V8_A(n,e,f) SR_FEAT (n,e,f,V8_A)
3841 #define SR_V8_R(n,e,f) SR_FEAT (n,e,f,V8_R)
3842 #define SR_V8_1(n,e,f) SR_FEAT (n,e,f,V8_1)
3843 #define SR_V8_2(n,e,f) SR_FEAT (n,e,f,V8_2)
3844 #define SR_V8_3(n,e,f) SR_FEAT (n,e,f,V8_3)
3845 #define SR_V8_4(n,e,f) SR_FEAT (n,e,f,V8_4)
3846 #define SR_V8_4(n,e,f) SR_FEAT (n,e,f,V8_4)
3847 #define SR_PAN(n,e,f) SR_FEAT (n,e,f,PAN)
3848 #define SR_RAS(n,e,f) SR_FEAT (n,e,f,RAS)
3849 #define SR_SSBS(n,e,f) SR_FEAT (n,e,f,SSBS)
3850 #define SR_SVE(n,e,f) SR_FEAT (n,e,f,SVE)
3851 #define SR_ID_PFR2(n,e,f) SR_FEAT (n,e,f,ID_PFR2)
3852 #define SR_PROFILE(n,e,f) SR_FEAT (n,e,f,PROFILE)
3853 #define SR_MEMTAG(n,e,f) SR_FEAT (n,e,f,MEMTAG)
3854 #define SR_SCXTNUM(n,e,f) SR_FEAT (n,e,f,SCXTNUM)
3856 #define SR_EXPAND_ELx(f,x) \
3873 #define SR_EXPAND_EL12(f) \
3874 SR_EXPAND_ELx (f,1) \
3877 /* TODO there is one more issues need to be resolved
3878 1. handle cpu-implementation-defined system registers.
3880 Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
3881 respectively. If neither of these are set then the register is read-write. */
3882 const aarch64_sys_reg aarch64_sys_regs
[] =
3884 SR_CORE ("spsr_el1", CPEN_ (0,C0
,0), 0), /* = spsr_svc. */
3885 SR_V8_1 ("spsr_el12", CPEN_ (5,C0
,0), 0),
3886 SR_CORE ("elr_el1", CPEN_ (0,C0
,1), 0),
3887 SR_V8_1 ("elr_el12", CPEN_ (5,C0
,1), 0),
3888 SR_CORE ("sp_el0", CPEN_ (0,C1
,0), 0),
3889 SR_CORE ("spsel", CPEN_ (0,C2
,0), 0),
3890 SR_CORE ("daif", CPEN_ (3,C2
,1), 0),
3891 SR_CORE ("currentel", CPEN_ (0,C2
,2), F_REG_READ
),
3892 SR_PAN ("pan", CPEN_ (0,C2
,3), 0),
3893 SR_V8_2 ("uao", CPEN_ (0,C2
,4), 0),
3894 SR_CORE ("nzcv", CPEN_ (3,C2
,0), 0),
3895 SR_SSBS ("ssbs", CPEN_ (3,C2
,6), 0),
3896 SR_CORE ("fpcr", CPEN_ (3,C4
,0), 0),
3897 SR_CORE ("fpsr", CPEN_ (3,C4
,1), 0),
3898 SR_CORE ("dspsr_el0", CPEN_ (3,C5
,0), 0),
3899 SR_CORE ("dlr_el0", CPEN_ (3,C5
,1), 0),
3900 SR_CORE ("spsr_el2", CPEN_ (4,C0
,0), 0), /* = spsr_hyp. */
3901 SR_CORE ("elr_el2", CPEN_ (4,C0
,1), 0),
3902 SR_CORE ("sp_el1", CPEN_ (4,C1
,0), 0),
3903 SR_CORE ("spsr_irq", CPEN_ (4,C3
,0), 0),
3904 SR_CORE ("spsr_abt", CPEN_ (4,C3
,1), 0),
3905 SR_CORE ("spsr_und", CPEN_ (4,C3
,2), 0),
3906 SR_CORE ("spsr_fiq", CPEN_ (4,C3
,3), 0),
3907 SR_CORE ("spsr_el3", CPEN_ (6,C0
,0), 0),
3908 SR_CORE ("elr_el3", CPEN_ (6,C0
,1), 0),
3909 SR_CORE ("sp_el2", CPEN_ (6,C1
,0), 0),
3910 SR_CORE ("spsr_svc", CPEN_ (0,C0
,0), F_DEPRECATED
), /* = spsr_el1. */
3911 SR_CORE ("spsr_hyp", CPEN_ (4,C0
,0), F_DEPRECATED
), /* = spsr_el2. */
3912 SR_CORE ("midr_el1", CPENC (3,0,C0
,C0
,0), F_REG_READ
),
3913 SR_CORE ("ctr_el0", CPENC (3,3,C0
,C0
,1), F_REG_READ
),
3914 SR_CORE ("mpidr_el1", CPENC (3,0,C0
,C0
,5), F_REG_READ
),
3915 SR_CORE ("revidr_el1", CPENC (3,0,C0
,C0
,6), F_REG_READ
),
3916 SR_CORE ("aidr_el1", CPENC (3,1,C0
,C0
,7), F_REG_READ
),
3917 SR_CORE ("dczid_el0", CPENC (3,3,C0
,C0
,7), F_REG_READ
),
3918 SR_CORE ("id_dfr0_el1", CPENC (3,0,C0
,C1
,2), F_REG_READ
),
3919 SR_CORE ("id_pfr0_el1", CPENC (3,0,C0
,C1
,0), F_REG_READ
),
3920 SR_CORE ("id_pfr1_el1", CPENC (3,0,C0
,C1
,1), F_REG_READ
),
3921 SR_ID_PFR2 ("id_pfr2_el1", CPENC (3,0,C0
,C3
,4), F_REG_READ
),
3922 SR_CORE ("id_afr0_el1", CPENC (3,0,C0
,C1
,3), F_REG_READ
),
3923 SR_CORE ("id_mmfr0_el1", CPENC (3,0,C0
,C1
,4), F_REG_READ
),
3924 SR_CORE ("id_mmfr1_el1", CPENC (3,0,C0
,C1
,5), F_REG_READ
),
3925 SR_CORE ("id_mmfr2_el1", CPENC (3,0,C0
,C1
,6), F_REG_READ
),
3926 SR_CORE ("id_mmfr3_el1", CPENC (3,0,C0
,C1
,7), F_REG_READ
),
3927 SR_CORE ("id_mmfr4_el1", CPENC (3,0,C0
,C2
,6), F_REG_READ
),
3928 SR_CORE ("id_isar0_el1", CPENC (3,0,C0
,C2
,0), F_REG_READ
),
3929 SR_CORE ("id_isar1_el1", CPENC (3,0,C0
,C2
,1), F_REG_READ
),
3930 SR_CORE ("id_isar2_el1", CPENC (3,0,C0
,C2
,2), F_REG_READ
),
3931 SR_CORE ("id_isar3_el1", CPENC (3,0,C0
,C2
,3), F_REG_READ
),
3932 SR_CORE ("id_isar4_el1", CPENC (3,0,C0
,C2
,4), F_REG_READ
),
3933 SR_CORE ("id_isar5_el1", CPENC (3,0,C0
,C2
,5), F_REG_READ
),
3934 SR_CORE ("mvfr0_el1", CPENC (3,0,C0
,C3
,0), F_REG_READ
),
3935 SR_CORE ("mvfr1_el1", CPENC (3,0,C0
,C3
,1), F_REG_READ
),
3936 SR_CORE ("mvfr2_el1", CPENC (3,0,C0
,C3
,2), F_REG_READ
),
3937 SR_CORE ("ccsidr_el1", CPENC (3,1,C0
,C0
,0), F_REG_READ
),
3938 SR_CORE ("id_aa64pfr0_el1", CPENC (3,0,C0
,C4
,0), F_REG_READ
),
3939 SR_CORE ("id_aa64pfr1_el1", CPENC (3,0,C0
,C4
,1), F_REG_READ
),
3940 SR_CORE ("id_aa64dfr0_el1", CPENC (3,0,C0
,C5
,0), F_REG_READ
),
3941 SR_CORE ("id_aa64dfr1_el1", CPENC (3,0,C0
,C5
,1), F_REG_READ
),
3942 SR_CORE ("id_aa64isar0_el1", CPENC (3,0,C0
,C6
,0), F_REG_READ
),
3943 SR_CORE ("id_aa64isar1_el1", CPENC (3,0,C0
,C6
,1), F_REG_READ
),
3944 SR_CORE ("id_aa64mmfr0_el1", CPENC (3,0,C0
,C7
,0), F_REG_READ
),
3945 SR_CORE ("id_aa64mmfr1_el1", CPENC (3,0,C0
,C7
,1), F_REG_READ
),
3946 SR_V8_2 ("id_aa64mmfr2_el1", CPENC (3,0,C0
,C7
,2), F_REG_READ
),
3947 SR_CORE ("id_aa64afr0_el1", CPENC (3,0,C0
,C5
,4), F_REG_READ
),
3948 SR_CORE ("id_aa64afr1_el1", CPENC (3,0,C0
,C5
,5), F_REG_READ
),
3949 SR_SVE ("id_aa64zfr0_el1", CPENC (3,0,C0
,C4
,4), F_REG_READ
),
3950 SR_CORE ("clidr_el1", CPENC (3,1,C0
,C0
,1), F_REG_READ
),
3951 SR_CORE ("csselr_el1", CPENC (3,2,C0
,C0
,0), 0),
3952 SR_CORE ("vpidr_el2", CPENC (3,4,C0
,C0
,0), 0),
3953 SR_CORE ("vmpidr_el2", CPENC (3,4,C0
,C0
,5), 0),
3954 SR_CORE ("sctlr_el1", CPENC (3,0,C1
,C0
,0), 0),
3955 SR_CORE ("sctlr_el2", CPENC (3,4,C1
,C0
,0), 0),
3956 SR_CORE ("sctlr_el3", CPENC (3,6,C1
,C0
,0), 0),
3957 SR_V8_1 ("sctlr_el12", CPENC (3,5,C1
,C0
,0), 0),
3958 SR_CORE ("actlr_el1", CPENC (3,0,C1
,C0
,1), 0),
3959 SR_CORE ("actlr_el2", CPENC (3,4,C1
,C0
,1), 0),
3960 SR_CORE ("actlr_el3", CPENC (3,6,C1
,C0
,1), 0),
3961 SR_CORE ("cpacr_el1", CPENC (3,0,C1
,C0
,2), 0),
3962 SR_V8_1 ("cpacr_el12", CPENC (3,5,C1
,C0
,2), 0),
3963 SR_CORE ("cptr_el2", CPENC (3,4,C1
,C1
,2), 0),
3964 SR_CORE ("cptr_el3", CPENC (3,6,C1
,C1
,2), 0),
3965 SR_CORE ("scr_el3", CPENC (3,6,C1
,C1
,0), 0),
3966 SR_CORE ("hcr_el2", CPENC (3,4,C1
,C1
,0), 0),
3967 SR_CORE ("mdcr_el2", CPENC (3,4,C1
,C1
,1), 0),
3968 SR_CORE ("mdcr_el3", CPENC (3,6,C1
,C3
,1), 0),
3969 SR_CORE ("hstr_el2", CPENC (3,4,C1
,C1
,3), 0),
3970 SR_CORE ("hacr_el2", CPENC (3,4,C1
,C1
,7), 0),
3971 SR_SVE ("zcr_el1", CPENC (3,0,C1
,C2
,0), 0),
3972 SR_SVE ("zcr_el12", CPENC (3,5,C1
,C2
,0), 0),
3973 SR_SVE ("zcr_el2", CPENC (3,4,C1
,C2
,0), 0),
3974 SR_SVE ("zcr_el3", CPENC (3,6,C1
,C2
,0), 0),
3975 SR_SVE ("zidr_el1", CPENC (3,0,C0
,C0
,7), 0),
3976 SR_CORE ("ttbr0_el1", CPENC (3,0,C2
,C0
,0), 0),
3977 SR_CORE ("ttbr1_el1", CPENC (3,0,C2
,C0
,1), 0),
3978 SR_V8_A ("ttbr0_el2", CPENC (3,4,C2
,C0
,0), 0),
3979 SR_V8_1_A ("ttbr1_el2", CPENC (3,4,C2
,C0
,1), 0),
3980 SR_CORE ("ttbr0_el3", CPENC (3,6,C2
,C0
,0), 0),
3981 SR_V8_1 ("ttbr0_el12", CPENC (3,5,C2
,C0
,0), 0),
3982 SR_V8_1 ("ttbr1_el12", CPENC (3,5,C2
,C0
,1), 0),
3983 SR_V8_A ("vttbr_el2", CPENC (3,4,C2
,C1
,0), 0),
3984 SR_CORE ("tcr_el1", CPENC (3,0,C2
,C0
,2), 0),
3985 SR_CORE ("tcr_el2", CPENC (3,4,C2
,C0
,2), 0),
3986 SR_CORE ("tcr_el3", CPENC (3,6,C2
,C0
,2), 0),
3987 SR_V8_1 ("tcr_el12", CPENC (3,5,C2
,C0
,2), 0),
3988 SR_CORE ("vtcr_el2", CPENC (3,4,C2
,C1
,2), 0),
3989 SR_V8_3 ("apiakeylo_el1", CPENC (3,0,C2
,C1
,0), 0),
3990 SR_V8_3 ("apiakeyhi_el1", CPENC (3,0,C2
,C1
,1), 0),
3991 SR_V8_3 ("apibkeylo_el1", CPENC (3,0,C2
,C1
,2), 0),
3992 SR_V8_3 ("apibkeyhi_el1", CPENC (3,0,C2
,C1
,3), 0),
3993 SR_V8_3 ("apdakeylo_el1", CPENC (3,0,C2
,C2
,0), 0),
3994 SR_V8_3 ("apdakeyhi_el1", CPENC (3,0,C2
,C2
,1), 0),
3995 SR_V8_3 ("apdbkeylo_el1", CPENC (3,0,C2
,C2
,2), 0),
3996 SR_V8_3 ("apdbkeyhi_el1", CPENC (3,0,C2
,C2
,3), 0),
3997 SR_V8_3 ("apgakeylo_el1", CPENC (3,0,C2
,C3
,0), 0),
3998 SR_V8_3 ("apgakeyhi_el1", CPENC (3,0,C2
,C3
,1), 0),
3999 SR_CORE ("afsr0_el1", CPENC (3,0,C5
,C1
,0), 0),
4000 SR_CORE ("afsr1_el1", CPENC (3,0,C5
,C1
,1), 0),
4001 SR_CORE ("afsr0_el2", CPENC (3,4,C5
,C1
,0), 0),
4002 SR_CORE ("afsr1_el2", CPENC (3,4,C5
,C1
,1), 0),
4003 SR_CORE ("afsr0_el3", CPENC (3,6,C5
,C1
,0), 0),
4004 SR_V8_1 ("afsr0_el12", CPENC (3,5,C5
,C1
,0), 0),
4005 SR_CORE ("afsr1_el3", CPENC (3,6,C5
,C1
,1), 0),
4006 SR_V8_1 ("afsr1_el12", CPENC (3,5,C5
,C1
,1), 0),
4007 SR_CORE ("esr_el1", CPENC (3,0,C5
,C2
,0), 0),
4008 SR_CORE ("esr_el2", CPENC (3,4,C5
,C2
,0), 0),
4009 SR_CORE ("esr_el3", CPENC (3,6,C5
,C2
,0), 0),
4010 SR_V8_1 ("esr_el12", CPENC (3,5,C5
,C2
,0), 0),
4011 SR_RAS ("vsesr_el2", CPENC (3,4,C5
,C2
,3), 0),
4012 SR_CORE ("fpexc32_el2", CPENC (3,4,C5
,C3
,0), 0),
4013 SR_RAS ("erridr_el1", CPENC (3,0,C5
,C3
,0), F_REG_READ
),
4014 SR_RAS ("errselr_el1", CPENC (3,0,C5
,C3
,1), 0),
4015 SR_RAS ("erxfr_el1", CPENC (3,0,C5
,C4
,0), F_REG_READ
),
4016 SR_RAS ("erxctlr_el1", CPENC (3,0,C5
,C4
,1), 0),
4017 SR_RAS ("erxstatus_el1", CPENC (3,0,C5
,C4
,2), 0),
4018 SR_RAS ("erxaddr_el1", CPENC (3,0,C5
,C4
,3), 0),
4019 SR_RAS ("erxmisc0_el1", CPENC (3,0,C5
,C5
,0), 0),
4020 SR_RAS ("erxmisc1_el1", CPENC (3,0,C5
,C5
,1), 0),
4021 SR_RAS ("erxmisc2_el1", CPENC (3,0,C5
,C5
,2), 0),
4022 SR_RAS ("erxmisc3_el1", CPENC (3,0,C5
,C5
,3), 0),
4023 SR_RAS ("erxpfgcdn_el1", CPENC (3,0,C5
,C4
,6), 0),
4024 SR_RAS ("erxpfgctl_el1", CPENC (3,0,C5
,C4
,5), 0),
4025 SR_RAS ("erxpfgf_el1", CPENC (3,0,C5
,C4
,4), F_REG_READ
),
4026 SR_CORE ("far_el1", CPENC (3,0,C6
,C0
,0), 0),
4027 SR_CORE ("far_el2", CPENC (3,4,C6
,C0
,0), 0),
4028 SR_CORE ("far_el3", CPENC (3,6,C6
,C0
,0), 0),
4029 SR_V8_1 ("far_el12", CPENC (3,5,C6
,C0
,0), 0),
4030 SR_CORE ("hpfar_el2", CPENC (3,4,C6
,C0
,4), 0),
4031 SR_CORE ("par_el1", CPENC (3,0,C7
,C4
,0), 0),
4032 SR_CORE ("mair_el1", CPENC (3,0,C10
,C2
,0), 0),
4033 SR_CORE ("mair_el2", CPENC (3,4,C10
,C2
,0), 0),
4034 SR_CORE ("mair_el3", CPENC (3,6,C10
,C2
,0), 0),
4035 SR_V8_1 ("mair_el12", CPENC (3,5,C10
,C2
,0), 0),
4036 SR_CORE ("amair_el1", CPENC (3,0,C10
,C3
,0), 0),
4037 SR_CORE ("amair_el2", CPENC (3,4,C10
,C3
,0), 0),
4038 SR_CORE ("amair_el3", CPENC (3,6,C10
,C3
,0), 0),
4039 SR_V8_1 ("amair_el12", CPENC (3,5,C10
,C3
,0), 0),
4040 SR_CORE ("vbar_el1", CPENC (3,0,C12
,C0
,0), 0),
4041 SR_CORE ("vbar_el2", CPENC (3,4,C12
,C0
,0), 0),
4042 SR_CORE ("vbar_el3", CPENC (3,6,C12
,C0
,0), 0),
4043 SR_V8_1 ("vbar_el12", CPENC (3,5,C12
,C0
,0), 0),
4044 SR_CORE ("rvbar_el1", CPENC (3,0,C12
,C0
,1), F_REG_READ
),
4045 SR_CORE ("rvbar_el2", CPENC (3,4,C12
,C0
,1), F_REG_READ
),
4046 SR_CORE ("rvbar_el3", CPENC (3,6,C12
,C0
,1), F_REG_READ
),
4047 SR_CORE ("rmr_el1", CPENC (3,0,C12
,C0
,2), 0),
4048 SR_CORE ("rmr_el2", CPENC (3,4,C12
,C0
,2), 0),
4049 SR_CORE ("rmr_el3", CPENC (3,6,C12
,C0
,2), 0),
4050 SR_CORE ("isr_el1", CPENC (3,0,C12
,C1
,0), F_REG_READ
),
4051 SR_RAS ("disr_el1", CPENC (3,0,C12
,C1
,1), 0),
4052 SR_RAS ("vdisr_el2", CPENC (3,4,C12
,C1
,1), 0),
4053 SR_CORE ("contextidr_el1", CPENC (3,0,C13
,C0
,1), 0),
4054 SR_V8_1 ("contextidr_el2", CPENC (3,4,C13
,C0
,1), 0),
4055 SR_V8_1 ("contextidr_el12", CPENC (3,5,C13
,C0
,1), 0),
4056 SR_RNG ("rndr", CPENC (3,3,C2
,C4
,0), F_REG_READ
),
4057 SR_RNG ("rndrrs", CPENC (3,3,C2
,C4
,1), F_REG_READ
),
4058 SR_MEMTAG ("tco", CPENC (3,3,C4
,C2
,7), 0),
4059 SR_MEMTAG ("tfsre0_el1", CPENC (3,0,C5
,C6
,1), 0),
4060 SR_MEMTAG ("tfsr_el1", CPENC (3,0,C5
,C6
,0), 0),
4061 SR_MEMTAG ("tfsr_el2", CPENC (3,4,C5
,C6
,0), 0),
4062 SR_MEMTAG ("tfsr_el3", CPENC (3,6,C5
,C6
,0), 0),
4063 SR_MEMTAG ("tfsr_el12", CPENC (3,5,C5
,C6
,0), 0),
4064 SR_MEMTAG ("rgsr_el1", CPENC (3,0,C1
,C0
,5), 0),
4065 SR_MEMTAG ("gcr_el1", CPENC (3,0,C1
,C0
,6), 0),
4066 SR_MEMTAG ("gmid_el1", CPENC (3,1,C0
,C0
,4), F_REG_READ
),
4067 SR_CORE ("tpidr_el0", CPENC (3,3,C13
,C0
,2), 0),
4068 SR_CORE ("tpidrro_el0", CPENC (3,3,C13
,C0
,3), 0),
4069 SR_CORE ("tpidr_el1", CPENC (3,0,C13
,C0
,4), 0),
4070 SR_CORE ("tpidr_el2", CPENC (3,4,C13
,C0
,2), 0),
4071 SR_CORE ("tpidr_el3", CPENC (3,6,C13
,C0
,2), 0),
4072 SR_SCXTNUM ("scxtnum_el0", CPENC (3,3,C13
,C0
,7), 0),
4073 SR_SCXTNUM ("scxtnum_el1", CPENC (3,0,C13
,C0
,7), 0),
4074 SR_SCXTNUM ("scxtnum_el2", CPENC (3,4,C13
,C0
,7), 0),
4075 SR_SCXTNUM ("scxtnum_el12", CPENC (3,5,C13
,C0
,7), 0),
4076 SR_SCXTNUM ("scxtnum_el3", CPENC (3,6,C13
,C0
,7), 0),
4077 SR_CORE ("teecr32_el1", CPENC (2,2,C0
, C0
,0), 0), /* See section 3.9.7.1. */
4078 SR_CORE ("cntfrq_el0", CPENC (3,3,C14
,C0
,0), 0),
4079 SR_CORE ("cntpct_el0", CPENC (3,3,C14
,C0
,1), F_REG_READ
),
4080 SR_CORE ("cntvct_el0", CPENC (3,3,C14
,C0
,2), F_REG_READ
),
4081 SR_CORE ("cntvoff_el2", CPENC (3,4,C14
,C0
,3), 0),
4082 SR_CORE ("cntkctl_el1", CPENC (3,0,C14
,C1
,0), 0),
4083 SR_V8_1 ("cntkctl_el12", CPENC (3,5,C14
,C1
,0), 0),
4084 SR_CORE ("cnthctl_el2", CPENC (3,4,C14
,C1
,0), 0),
4085 SR_CORE ("cntp_tval_el0", CPENC (3,3,C14
,C2
,0), 0),
4086 SR_V8_1 ("cntp_tval_el02", CPENC (3,5,C14
,C2
,0), 0),
4087 SR_CORE ("cntp_ctl_el0", CPENC (3,3,C14
,C2
,1), 0),
4088 SR_V8_1 ("cntp_ctl_el02", CPENC (3,5,C14
,C2
,1), 0),
4089 SR_CORE ("cntp_cval_el0", CPENC (3,3,C14
,C2
,2), 0),
4090 SR_V8_1 ("cntp_cval_el02", CPENC (3,5,C14
,C2
,2), 0),
4091 SR_CORE ("cntv_tval_el0", CPENC (3,3,C14
,C3
,0), 0),
4092 SR_V8_1 ("cntv_tval_el02", CPENC (3,5,C14
,C3
,0), 0),
4093 SR_CORE ("cntv_ctl_el0", CPENC (3,3,C14
,C3
,1), 0),
4094 SR_V8_1 ("cntv_ctl_el02", CPENC (3,5,C14
,C3
,1), 0),
4095 SR_CORE ("cntv_cval_el0", CPENC (3,3,C14
,C3
,2), 0),
4096 SR_V8_1 ("cntv_cval_el02", CPENC (3,5,C14
,C3
,2), 0),
4097 SR_CORE ("cnthp_tval_el2", CPENC (3,4,C14
,C2
,0), 0),
4098 SR_CORE ("cnthp_ctl_el2", CPENC (3,4,C14
,C2
,1), 0),
4099 SR_CORE ("cnthp_cval_el2", CPENC (3,4,C14
,C2
,2), 0),
4100 SR_CORE ("cntps_tval_el1", CPENC (3,7,C14
,C2
,0), 0),
4101 SR_CORE ("cntps_ctl_el1", CPENC (3,7,C14
,C2
,1), 0),
4102 SR_CORE ("cntps_cval_el1", CPENC (3,7,C14
,C2
,2), 0),
4103 SR_V8_1 ("cnthv_tval_el2", CPENC (3,4,C14
,C3
,0), 0),
4104 SR_V8_1 ("cnthv_ctl_el2", CPENC (3,4,C14
,C3
,1), 0),
4105 SR_V8_1 ("cnthv_cval_el2", CPENC (3,4,C14
,C3
,2), 0),
4106 SR_CORE ("dacr32_el2", CPENC (3,4,C3
,C0
,0), 0),
4107 SR_CORE ("ifsr32_el2", CPENC (3,4,C5
,C0
,1), 0),
4108 SR_CORE ("teehbr32_el1", CPENC (2,2,C1
,C0
,0), 0),
4109 SR_CORE ("sder32_el3", CPENC (3,6,C1
,C1
,1), 0),
4110 SR_CORE ("mdscr_el1", CPENC (2,0,C0
,C2
,2), 0),
4111 SR_CORE ("mdccsr_el0", CPENC (2,3,C0
,C1
,0), F_REG_READ
),
4112 SR_CORE ("mdccint_el1", CPENC (2,0,C0
,C2
,0), 0),
4113 SR_CORE ("dbgdtr_el0", CPENC (2,3,C0
,C4
,0), 0),
4114 SR_CORE ("dbgdtrrx_el0", CPENC (2,3,C0
,C5
,0), F_REG_READ
),
4115 SR_CORE ("dbgdtrtx_el0", CPENC (2,3,C0
,C5
,0), F_REG_WRITE
),
4116 SR_CORE ("osdtrrx_el1", CPENC (2,0,C0
,C0
,2), 0),
4117 SR_CORE ("osdtrtx_el1", CPENC (2,0,C0
,C3
,2), 0),
4118 SR_CORE ("oseccr_el1", CPENC (2,0,C0
,C6
,2), 0),
4119 SR_CORE ("dbgvcr32_el2", CPENC (2,4,C0
,C7
,0), 0),
4120 SR_CORE ("dbgbvr0_el1", CPENC (2,0,C0
,C0
,4), 0),
4121 SR_CORE ("dbgbvr1_el1", CPENC (2,0,C0
,C1
,4), 0),
4122 SR_CORE ("dbgbvr2_el1", CPENC (2,0,C0
,C2
,4), 0),
4123 SR_CORE ("dbgbvr3_el1", CPENC (2,0,C0
,C3
,4), 0),
4124 SR_CORE ("dbgbvr4_el1", CPENC (2,0,C0
,C4
,4), 0),
4125 SR_CORE ("dbgbvr5_el1", CPENC (2,0,C0
,C5
,4), 0),
4126 SR_CORE ("dbgbvr6_el1", CPENC (2,0,C0
,C6
,4), 0),
4127 SR_CORE ("dbgbvr7_el1", CPENC (2,0,C0
,C7
,4), 0),
4128 SR_CORE ("dbgbvr8_el1", CPENC (2,0,C0
,C8
,4), 0),
4129 SR_CORE ("dbgbvr9_el1", CPENC (2,0,C0
,C9
,4), 0),
4130 SR_CORE ("dbgbvr10_el1", CPENC (2,0,C0
,C10
,4), 0),
4131 SR_CORE ("dbgbvr11_el1", CPENC (2,0,C0
,C11
,4), 0),
4132 SR_CORE ("dbgbvr12_el1", CPENC (2,0,C0
,C12
,4), 0),
4133 SR_CORE ("dbgbvr13_el1", CPENC (2,0,C0
,C13
,4), 0),
4134 SR_CORE ("dbgbvr14_el1", CPENC (2,0,C0
,C14
,4), 0),
4135 SR_CORE ("dbgbvr15_el1", CPENC (2,0,C0
,C15
,4), 0),
4136 SR_CORE ("dbgbcr0_el1", CPENC (2,0,C0
,C0
,5), 0),
4137 SR_CORE ("dbgbcr1_el1", CPENC (2,0,C0
,C1
,5), 0),
4138 SR_CORE ("dbgbcr2_el1", CPENC (2,0,C0
,C2
,5), 0),
4139 SR_CORE ("dbgbcr3_el1", CPENC (2,0,C0
,C3
,5), 0),
4140 SR_CORE ("dbgbcr4_el1", CPENC (2,0,C0
,C4
,5), 0),
4141 SR_CORE ("dbgbcr5_el1", CPENC (2,0,C0
,C5
,5), 0),
4142 SR_CORE ("dbgbcr6_el1", CPENC (2,0,C0
,C6
,5), 0),
4143 SR_CORE ("dbgbcr7_el1", CPENC (2,0,C0
,C7
,5), 0),
4144 SR_CORE ("dbgbcr8_el1", CPENC (2,0,C0
,C8
,5), 0),
4145 SR_CORE ("dbgbcr9_el1", CPENC (2,0,C0
,C9
,5), 0),
4146 SR_CORE ("dbgbcr10_el1", CPENC (2,0,C0
,C10
,5), 0),
4147 SR_CORE ("dbgbcr11_el1", CPENC (2,0,C0
,C11
,5), 0),
4148 SR_CORE ("dbgbcr12_el1", CPENC (2,0,C0
,C12
,5), 0),
4149 SR_CORE ("dbgbcr13_el1", CPENC (2,0,C0
,C13
,5), 0),
4150 SR_CORE ("dbgbcr14_el1", CPENC (2,0,C0
,C14
,5), 0),
4151 SR_CORE ("dbgbcr15_el1", CPENC (2,0,C0
,C15
,5), 0),
4152 SR_CORE ("dbgwvr0_el1", CPENC (2,0,C0
,C0
,6), 0),
4153 SR_CORE ("dbgwvr1_el1", CPENC (2,0,C0
,C1
,6), 0),
4154 SR_CORE ("dbgwvr2_el1", CPENC (2,0,C0
,C2
,6), 0),
4155 SR_CORE ("dbgwvr3_el1", CPENC (2,0,C0
,C3
,6), 0),
4156 SR_CORE ("dbgwvr4_el1", CPENC (2,0,C0
,C4
,6), 0),
4157 SR_CORE ("dbgwvr5_el1", CPENC (2,0,C0
,C5
,6), 0),
4158 SR_CORE ("dbgwvr6_el1", CPENC (2,0,C0
,C6
,6), 0),
4159 SR_CORE ("dbgwvr7_el1", CPENC (2,0,C0
,C7
,6), 0),
4160 SR_CORE ("dbgwvr8_el1", CPENC (2,0,C0
,C8
,6), 0),
4161 SR_CORE ("dbgwvr9_el1", CPENC (2,0,C0
,C9
,6), 0),
4162 SR_CORE ("dbgwvr10_el1", CPENC (2,0,C0
,C10
,6), 0),
4163 SR_CORE ("dbgwvr11_el1", CPENC (2,0,C0
,C11
,6), 0),
4164 SR_CORE ("dbgwvr12_el1", CPENC (2,0,C0
,C12
,6), 0),
4165 SR_CORE ("dbgwvr13_el1", CPENC (2,0,C0
,C13
,6), 0),
4166 SR_CORE ("dbgwvr14_el1", CPENC (2,0,C0
,C14
,6), 0),
4167 SR_CORE ("dbgwvr15_el1", CPENC (2,0,C0
,C15
,6), 0),
4168 SR_CORE ("dbgwcr0_el1", CPENC (2,0,C0
,C0
,7), 0),
4169 SR_CORE ("dbgwcr1_el1", CPENC (2,0,C0
,C1
,7), 0),
4170 SR_CORE ("dbgwcr2_el1", CPENC (2,0,C0
,C2
,7), 0),
4171 SR_CORE ("dbgwcr3_el1", CPENC (2,0,C0
,C3
,7), 0),
4172 SR_CORE ("dbgwcr4_el1", CPENC (2,0,C0
,C4
,7), 0),
4173 SR_CORE ("dbgwcr5_el1", CPENC (2,0,C0
,C5
,7), 0),
4174 SR_CORE ("dbgwcr6_el1", CPENC (2,0,C0
,C6
,7), 0),
4175 SR_CORE ("dbgwcr7_el1", CPENC (2,0,C0
,C7
,7), 0),
4176 SR_CORE ("dbgwcr8_el1", CPENC (2,0,C0
,C8
,7), 0),
4177 SR_CORE ("dbgwcr9_el1", CPENC (2,0,C0
,C9
,7), 0),
4178 SR_CORE ("dbgwcr10_el1", CPENC (2,0,C0
,C10
,7), 0),
4179 SR_CORE ("dbgwcr11_el1", CPENC (2,0,C0
,C11
,7), 0),
4180 SR_CORE ("dbgwcr12_el1", CPENC (2,0,C0
,C12
,7), 0),
4181 SR_CORE ("dbgwcr13_el1", CPENC (2,0,C0
,C13
,7), 0),
4182 SR_CORE ("dbgwcr14_el1", CPENC (2,0,C0
,C14
,7), 0),
4183 SR_CORE ("dbgwcr15_el1", CPENC (2,0,C0
,C15
,7), 0),
4184 SR_CORE ("mdrar_el1", CPENC (2,0,C1
,C0
,0), F_REG_READ
),
4185 SR_CORE ("oslar_el1", CPENC (2,0,C1
,C0
,4), F_REG_WRITE
),
4186 SR_CORE ("oslsr_el1", CPENC (2,0,C1
,C1
,4), F_REG_READ
),
4187 SR_CORE ("osdlr_el1", CPENC (2,0,C1
,C3
,4), 0),
4188 SR_CORE ("dbgprcr_el1", CPENC (2,0,C1
,C4
,4), 0),
4189 SR_CORE ("dbgclaimset_el1", CPENC (2,0,C7
,C8
,6), 0),
4190 SR_CORE ("dbgclaimclr_el1", CPENC (2,0,C7
,C9
,6), 0),
4191 SR_CORE ("dbgauthstatus_el1", CPENC (2,0,C7
,C14
,6), F_REG_READ
),
4192 SR_PROFILE ("pmblimitr_el1", CPENC (3,0,C9
,C10
,0), 0),
4193 SR_PROFILE ("pmbptr_el1", CPENC (3,0,C9
,C10
,1), 0),
4194 SR_PROFILE ("pmbsr_el1", CPENC (3,0,C9
,C10
,3), 0),
4195 SR_PROFILE ("pmbidr_el1", CPENC (3,0,C9
,C10
,7), F_REG_READ
),
4196 SR_PROFILE ("pmscr_el1", CPENC (3,0,C9
,C9
,0), 0),
4197 SR_PROFILE ("pmsicr_el1", CPENC (3,0,C9
,C9
,2), 0),
4198 SR_PROFILE ("pmsirr_el1", CPENC (3,0,C9
,C9
,3), 0),
4199 SR_PROFILE ("pmsfcr_el1", CPENC (3,0,C9
,C9
,4), 0),
4200 SR_PROFILE ("pmsevfr_el1", CPENC (3,0,C9
,C9
,5), 0),
4201 SR_PROFILE ("pmslatfr_el1", CPENC (3,0,C9
,C9
,6), 0),
4202 SR_PROFILE ("pmsidr_el1", CPENC (3,0,C9
,C9
,7), 0),
4203 SR_PROFILE ("pmscr_el2", CPENC (3,4,C9
,C9
,0), 0),
4204 SR_PROFILE ("pmscr_el12", CPENC (3,5,C9
,C9
,0), 0),
4205 SR_CORE ("pmcr_el0", CPENC (3,3,C9
,C12
,0), 0),
4206 SR_CORE ("pmcntenset_el0", CPENC (3,3,C9
,C12
,1), 0),
4207 SR_CORE ("pmcntenclr_el0", CPENC (3,3,C9
,C12
,2), 0),
4208 SR_CORE ("pmovsclr_el0", CPENC (3,3,C9
,C12
,3), 0),
4209 SR_CORE ("pmswinc_el0", CPENC (3,3,C9
,C12
,4), F_REG_WRITE
),
4210 SR_CORE ("pmselr_el0", CPENC (3,3,C9
,C12
,5), 0),
4211 SR_CORE ("pmceid0_el0", CPENC (3,3,C9
,C12
,6), F_REG_READ
),
4212 SR_CORE ("pmceid1_el0", CPENC (3,3,C9
,C12
,7), F_REG_READ
),
4213 SR_CORE ("pmccntr_el0", CPENC (3,3,C9
,C13
,0), 0),
4214 SR_CORE ("pmxevtyper_el0", CPENC (3,3,C9
,C13
,1), 0),
4215 SR_CORE ("pmxevcntr_el0", CPENC (3,3,C9
,C13
,2), 0),
4216 SR_CORE ("pmuserenr_el0", CPENC (3,3,C9
,C14
,0), 0),
4217 SR_CORE ("pmintenset_el1", CPENC (3,0,C9
,C14
,1), 0),
4218 SR_CORE ("pmintenclr_el1", CPENC (3,0,C9
,C14
,2), 0),
4219 SR_CORE ("pmovsset_el0", CPENC (3,3,C9
,C14
,3), 0),
4220 SR_CORE ("pmevcntr0_el0", CPENC (3,3,C14
,C8
,0), 0),
4221 SR_CORE ("pmevcntr1_el0", CPENC (3,3,C14
,C8
,1), 0),
4222 SR_CORE ("pmevcntr2_el0", CPENC (3,3,C14
,C8
,2), 0),
4223 SR_CORE ("pmevcntr3_el0", CPENC (3,3,C14
,C8
,3), 0),
4224 SR_CORE ("pmevcntr4_el0", CPENC (3,3,C14
,C8
,4), 0),
4225 SR_CORE ("pmevcntr5_el0", CPENC (3,3,C14
,C8
,5), 0),
4226 SR_CORE ("pmevcntr6_el0", CPENC (3,3,C14
,C8
,6), 0),
4227 SR_CORE ("pmevcntr7_el0", CPENC (3,3,C14
,C8
,7), 0),
4228 SR_CORE ("pmevcntr8_el0", CPENC (3,3,C14
,C9
,0), 0),
4229 SR_CORE ("pmevcntr9_el0", CPENC (3,3,C14
,C9
,1), 0),
4230 SR_CORE ("pmevcntr10_el0", CPENC (3,3,C14
,C9
,2), 0),
4231 SR_CORE ("pmevcntr11_el0", CPENC (3,3,C14
,C9
,3), 0),
4232 SR_CORE ("pmevcntr12_el0", CPENC (3,3,C14
,C9
,4), 0),
4233 SR_CORE ("pmevcntr13_el0", CPENC (3,3,C14
,C9
,5), 0),
4234 SR_CORE ("pmevcntr14_el0", CPENC (3,3,C14
,C9
,6), 0),
4235 SR_CORE ("pmevcntr15_el0", CPENC (3,3,C14
,C9
,7), 0),
4236 SR_CORE ("pmevcntr16_el0", CPENC (3,3,C14
,C10
,0), 0),
4237 SR_CORE ("pmevcntr17_el0", CPENC (3,3,C14
,C10
,1), 0),
4238 SR_CORE ("pmevcntr18_el0", CPENC (3,3,C14
,C10
,2), 0),
4239 SR_CORE ("pmevcntr19_el0", CPENC (3,3,C14
,C10
,3), 0),
4240 SR_CORE ("pmevcntr20_el0", CPENC (3,3,C14
,C10
,4), 0),
4241 SR_CORE ("pmevcntr21_el0", CPENC (3,3,C14
,C10
,5), 0),
4242 SR_CORE ("pmevcntr22_el0", CPENC (3,3,C14
,C10
,6), 0),
4243 SR_CORE ("pmevcntr23_el0", CPENC (3,3,C14
,C10
,7), 0),
4244 SR_CORE ("pmevcntr24_el0", CPENC (3,3,C14
,C11
,0), 0),
4245 SR_CORE ("pmevcntr25_el0", CPENC (3,3,C14
,C11
,1), 0),
4246 SR_CORE ("pmevcntr26_el0", CPENC (3,3,C14
,C11
,2), 0),
4247 SR_CORE ("pmevcntr27_el0", CPENC (3,3,C14
,C11
,3), 0),
4248 SR_CORE ("pmevcntr28_el0", CPENC (3,3,C14
,C11
,4), 0),
4249 SR_CORE ("pmevcntr29_el0", CPENC (3,3,C14
,C11
,5), 0),
4250 SR_CORE ("pmevcntr30_el0", CPENC (3,3,C14
,C11
,6), 0),
4251 SR_CORE ("pmevtyper0_el0", CPENC (3,3,C14
,C12
,0), 0),
4252 SR_CORE ("pmevtyper1_el0", CPENC (3,3,C14
,C12
,1), 0),
4253 SR_CORE ("pmevtyper2_el0", CPENC (3,3,C14
,C12
,2), 0),
4254 SR_CORE ("pmevtyper3_el0", CPENC (3,3,C14
,C12
,3), 0),
4255 SR_CORE ("pmevtyper4_el0", CPENC (3,3,C14
,C12
,4), 0),
4256 SR_CORE ("pmevtyper5_el0", CPENC (3,3,C14
,C12
,5), 0),
4257 SR_CORE ("pmevtyper6_el0", CPENC (3,3,C14
,C12
,6), 0),
4258 SR_CORE ("pmevtyper7_el0", CPENC (3,3,C14
,C12
,7), 0),
4259 SR_CORE ("pmevtyper8_el0", CPENC (3,3,C14
,C13
,0), 0),
4260 SR_CORE ("pmevtyper9_el0", CPENC (3,3,C14
,C13
,1), 0),
4261 SR_CORE ("pmevtyper10_el0", CPENC (3,3,C14
,C13
,2), 0),
4262 SR_CORE ("pmevtyper11_el0", CPENC (3,3,C14
,C13
,3), 0),
4263 SR_CORE ("pmevtyper12_el0", CPENC (3,3,C14
,C13
,4), 0),
4264 SR_CORE ("pmevtyper13_el0", CPENC (3,3,C14
,C13
,5), 0),
4265 SR_CORE ("pmevtyper14_el0", CPENC (3,3,C14
,C13
,6), 0),
4266 SR_CORE ("pmevtyper15_el0", CPENC (3,3,C14
,C13
,7), 0),
4267 SR_CORE ("pmevtyper16_el0", CPENC (3,3,C14
,C14
,0), 0),
4268 SR_CORE ("pmevtyper17_el0", CPENC (3,3,C14
,C14
,1), 0),
4269 SR_CORE ("pmevtyper18_el0", CPENC (3,3,C14
,C14
,2), 0),
4270 SR_CORE ("pmevtyper19_el0", CPENC (3,3,C14
,C14
,3), 0),
4271 SR_CORE ("pmevtyper20_el0", CPENC (3,3,C14
,C14
,4), 0),
4272 SR_CORE ("pmevtyper21_el0", CPENC (3,3,C14
,C14
,5), 0),
4273 SR_CORE ("pmevtyper22_el0", CPENC (3,3,C14
,C14
,6), 0),
4274 SR_CORE ("pmevtyper23_el0", CPENC (3,3,C14
,C14
,7), 0),
4275 SR_CORE ("pmevtyper24_el0", CPENC (3,3,C14
,C15
,0), 0),
4276 SR_CORE ("pmevtyper25_el0", CPENC (3,3,C14
,C15
,1), 0),
4277 SR_CORE ("pmevtyper26_el0", CPENC (3,3,C14
,C15
,2), 0),
4278 SR_CORE ("pmevtyper27_el0", CPENC (3,3,C14
,C15
,3), 0),
4279 SR_CORE ("pmevtyper28_el0", CPENC (3,3,C14
,C15
,4), 0),
4280 SR_CORE ("pmevtyper29_el0", CPENC (3,3,C14
,C15
,5), 0),
4281 SR_CORE ("pmevtyper30_el0", CPENC (3,3,C14
,C15
,6), 0),
4282 SR_CORE ("pmccfiltr_el0", CPENC (3,3,C14
,C15
,7), 0),
4284 SR_V8_4 ("dit", CPEN_ (3,C2
,5), 0),
4285 SR_V8_4 ("vstcr_el2", CPENC (3,4,C2
,C6
,2), 0),
4286 SR_V8_4_A ("vsttbr_el2", CPENC (3,4,C2
,C6
,0), 0),
4287 SR_V8_4 ("cnthvs_tval_el2", CPENC (3,4,C14
,C4
,0), 0),
4288 SR_V8_4 ("cnthvs_cval_el2", CPENC (3,4,C14
,C4
,2), 0),
4289 SR_V8_4 ("cnthvs_ctl_el2", CPENC (3,4,C14
,C4
,1), 0),
4290 SR_V8_4 ("cnthps_tval_el2", CPENC (3,4,C14
,C5
,0), 0),
4291 SR_V8_4 ("cnthps_cval_el2", CPENC (3,4,C14
,C5
,2), 0),
4292 SR_V8_4 ("cnthps_ctl_el2", CPENC (3,4,C14
,C5
,1), 0),
4293 SR_V8_4 ("sder32_el2", CPENC (3,4,C1
,C3
,1), 0),
4294 SR_V8_4 ("vncr_el2", CPENC (3,4,C2
,C2
,0), 0),
4296 SR_CORE ("mpam0_el1", CPENC (3,0,C10
,C5
,1), 0),
4297 SR_CORE ("mpam1_el1", CPENC (3,0,C10
,C5
,0), 0),
4298 SR_CORE ("mpam1_el12", CPENC (3,5,C10
,C5
,0), 0),
4299 SR_CORE ("mpam2_el2", CPENC (3,4,C10
,C5
,0), 0),
4300 SR_CORE ("mpam3_el3", CPENC (3,6,C10
,C5
,0), 0),
4301 SR_CORE ("mpamhcr_el2", CPENC (3,4,C10
,C4
,0), 0),
4302 SR_CORE ("mpamidr_el1", CPENC (3,0,C10
,C4
,4), F_REG_READ
),
4303 SR_CORE ("mpamvpm0_el2", CPENC (3,4,C10
,C6
,0), 0),
4304 SR_CORE ("mpamvpm1_el2", CPENC (3,4,C10
,C6
,1), 0),
4305 SR_CORE ("mpamvpm2_el2", CPENC (3,4,C10
,C6
,2), 0),
4306 SR_CORE ("mpamvpm3_el2", CPENC (3,4,C10
,C6
,3), 0),
4307 SR_CORE ("mpamvpm4_el2", CPENC (3,4,C10
,C6
,4), 0),
4308 SR_CORE ("mpamvpm5_el2", CPENC (3,4,C10
,C6
,5), 0),
4309 SR_CORE ("mpamvpm6_el2", CPENC (3,4,C10
,C6
,6), 0),
4310 SR_CORE ("mpamvpm7_el2", CPENC (3,4,C10
,C6
,7), 0),
4311 SR_CORE ("mpamvpmv_el2", CPENC (3,4,C10
,C4
,1), 0),
4313 SR_V8_R ("mpuir_el1", CPENC (3,0,C0
,C0
,4), F_REG_READ
),
4314 SR_V8_R ("mpuir_el2", CPENC (3,4,C0
,C0
,4), F_REG_READ
),
4315 SR_V8_R ("prbar_el1", CPENC (3,0,C6
,C8
,0), 0),
4316 SR_V8_R ("prbar_el2", CPENC (3,4,C6
,C8
,0), 0),
4318 #define ENC_BARLAR(x,n,lar) \
4319 CPENC (3, (x-1) << 2, C6, 8 | (n >> 1), ((n & 1) << 2) | lar)
4321 #define PRBARn_ELx(x,n) SR_V8_R ("prbar" #n "_el" #x, ENC_BARLAR (x,n,0), 0)
4322 #define PRLARn_ELx(x,n) SR_V8_R ("prlar" #n "_el" #x, ENC_BARLAR (x,n,1), 0)
4324 SR_EXPAND_EL12 (PRBARn_ELx
)
4325 SR_V8_R ("prenr_el1", CPENC (3,0,C6
,C1
,1), 0),
4326 SR_V8_R ("prenr_el2", CPENC (3,4,C6
,C1
,1), 0),
4327 SR_V8_R ("prlar_el1", CPENC (3,0,C6
,C8
,1), 0),
4328 SR_V8_R ("prlar_el2", CPENC (3,4,C6
,C8
,1), 0),
4329 SR_EXPAND_EL12 (PRLARn_ELx
)
4330 SR_V8_R ("prselr_el1", CPENC (3,0,C6
,C2
,1), 0),
4331 SR_V8_R ("prselr_el2", CPENC (3,4,C6
,C2
,1), 0),
4332 SR_V8_R ("vsctlr_el2", CPENC (3,4,C2
,C0
,0), 0),
4334 SR_CORE("trbbaser_el1", CPENC (3,0,C9
,C11
,2), 0),
4335 SR_CORE("trbidr_el1", CPENC (3,0,C9
,C11
,7), F_REG_READ
),
4336 SR_CORE("trblimitr_el1", CPENC (3,0,C9
,C11
,0), 0),
4337 SR_CORE("trbmar_el1", CPENC (3,0,C9
,C11
,4), 0),
4338 SR_CORE("trbptr_el1", CPENC (3,0,C9
,C11
,1), 0),
4339 SR_CORE("trbsr_el1", CPENC (3,0,C9
,C11
,3), 0),
4340 SR_CORE("trbtrg_el1", CPENC (3,0,C9
,C11
,6), 0),
4342 SR_CORE ("trcextinselr0", CPENC (2,1,C0
,C8
,4), 0),
4343 SR_CORE ("trcextinselr1", CPENC (2,1,C0
,C9
,4), 0),
4344 SR_CORE ("trcextinselr2", CPENC (2,1,C0
,C10
,4), 0),
4345 SR_CORE ("trcextinselr3", CPENC (2,1,C0
,C11
,4), 0),
4346 SR_CORE ("trcrsr", CPENC (2,1,C0
,C10
,0), 0),
4348 SR_CORE ("trcauthstatus", CPENC (2,1,C7
,C14
,6), F_REG_READ
),
4349 SR_CORE ("trccidr0", CPENC (2,1,C7
,C12
,7), F_REG_READ
),
4350 SR_CORE ("trccidr1", CPENC (2,1,C7
,C13
,7), F_REG_READ
),
4351 SR_CORE ("trccidr2", CPENC (2,1,C7
,C14
,7), F_REG_READ
),
4352 SR_CORE ("trccidr3", CPENC (2,1,C7
,C15
,7), F_REG_READ
),
4353 SR_CORE ("trcdevaff0", CPENC (2,1,C7
,C10
,6), F_REG_READ
),
4354 SR_CORE ("trcdevaff1", CPENC (2,1,C7
,C11
,6), F_REG_READ
),
4355 SR_CORE ("trcdevarch", CPENC (2,1,C7
,C15
,6), F_REG_READ
),
4356 SR_CORE ("trcdevid", CPENC (2,1,C7
,C2
,7), F_REG_READ
),
4357 SR_CORE ("trcdevtype", CPENC (2,1,C7
,C3
,7), F_REG_READ
),
4358 SR_CORE ("trcidr0", CPENC (2,1,C0
,C8
,7), F_REG_READ
),
4359 SR_CORE ("trcidr1", CPENC (2,1,C0
,C9
,7), F_REG_READ
),
4360 SR_CORE ("trcidr2", CPENC (2,1,C0
,C10
,7), F_REG_READ
),
4361 SR_CORE ("trcidr3", CPENC (2,1,C0
,C11
,7), F_REG_READ
),
4362 SR_CORE ("trcidr4", CPENC (2,1,C0
,C12
,7), F_REG_READ
),
4363 SR_CORE ("trcidr5", CPENC (2,1,C0
,C13
,7), F_REG_READ
),
4364 SR_CORE ("trcidr6", CPENC (2,1,C0
,C14
,7), F_REG_READ
),
4365 SR_CORE ("trcidr7", CPENC (2,1,C0
,C15
,7), F_REG_READ
),
4366 SR_CORE ("trcidr8", CPENC (2,1,C0
,C0
,6), F_REG_READ
),
4367 SR_CORE ("trcidr9", CPENC (2,1,C0
,C1
,6), F_REG_READ
),
4368 SR_CORE ("trcidr10", CPENC (2,1,C0
,C2
,6), F_REG_READ
),
4369 SR_CORE ("trcidr11", CPENC (2,1,C0
,C3
,6), F_REG_READ
),
4370 SR_CORE ("trcidr12", CPENC (2,1,C0
,C4
,6), F_REG_READ
),
4371 SR_CORE ("trcidr13", CPENC (2,1,C0
,C5
,6), F_REG_READ
),
4372 SR_CORE ("trclsr", CPENC (2,1,C7
,C13
,6), F_REG_READ
),
4373 SR_CORE ("trcoslsr", CPENC (2,1,C1
,C1
,4), F_REG_READ
),
4374 SR_CORE ("trcpdsr", CPENC (2,1,C1
,C5
,4), F_REG_READ
),
4375 SR_CORE ("trcpidr0", CPENC (2,1,C7
,C8
,7), F_REG_READ
),
4376 SR_CORE ("trcpidr1", CPENC (2,1,C7
,C9
,7), F_REG_READ
),
4377 SR_CORE ("trcpidr2", CPENC (2,1,C7
,C10
,7), F_REG_READ
),
4378 SR_CORE ("trcpidr3", CPENC (2,1,C7
,C11
,7), F_REG_READ
),
4379 SR_CORE ("trcpidr4", CPENC (2,1,C7
,C4
,7), F_REG_READ
),
4380 SR_CORE ("trcpidr5", CPENC (2,1,C7
,C5
,7), F_REG_READ
),
4381 SR_CORE ("trcpidr6", CPENC (2,1,C7
,C6
,7), F_REG_READ
),
4382 SR_CORE ("trcpidr7", CPENC (2,1,C7
,C7
,7), F_REG_READ
),
4383 SR_CORE ("trcstatr", CPENC (2,1,C0
,C3
,0), F_REG_READ
),
4384 SR_CORE ("trcacatr0", CPENC (2,1,C2
,C0
,2), 0),
4385 SR_CORE ("trcacatr1", CPENC (2,1,C2
,C2
,2), 0),
4386 SR_CORE ("trcacatr2", CPENC (2,1,C2
,C4
,2), 0),
4387 SR_CORE ("trcacatr3", CPENC (2,1,C2
,C6
,2), 0),
4388 SR_CORE ("trcacatr4", CPENC (2,1,C2
,C8
,2), 0),
4389 SR_CORE ("trcacatr5", CPENC (2,1,C2
,C10
,2), 0),
4390 SR_CORE ("trcacatr6", CPENC (2,1,C2
,C12
,2), 0),
4391 SR_CORE ("trcacatr7", CPENC (2,1,C2
,C14
,2), 0),
4392 SR_CORE ("trcacatr8", CPENC (2,1,C2
,C0
,3), 0),
4393 SR_CORE ("trcacatr9", CPENC (2,1,C2
,C2
,3), 0),
4394 SR_CORE ("trcacatr10", CPENC (2,1,C2
,C4
,3), 0),
4395 SR_CORE ("trcacatr11", CPENC (2,1,C2
,C6
,3), 0),
4396 SR_CORE ("trcacatr12", CPENC (2,1,C2
,C8
,3), 0),
4397 SR_CORE ("trcacatr13", CPENC (2,1,C2
,C10
,3), 0),
4398 SR_CORE ("trcacatr14", CPENC (2,1,C2
,C12
,3), 0),
4399 SR_CORE ("trcacatr15", CPENC (2,1,C2
,C14
,3), 0),
4400 SR_CORE ("trcacvr0", CPENC (2,1,C2
,C0
,0), 0),
4401 SR_CORE ("trcacvr1", CPENC (2,1,C2
,C2
,0), 0),
4402 SR_CORE ("trcacvr2", CPENC (2,1,C2
,C4
,0), 0),
4403 SR_CORE ("trcacvr3", CPENC (2,1,C2
,C6
,0), 0),
4404 SR_CORE ("trcacvr4", CPENC (2,1,C2
,C8
,0), 0),
4405 SR_CORE ("trcacvr5", CPENC (2,1,C2
,C10
,0), 0),
4406 SR_CORE ("trcacvr6", CPENC (2,1,C2
,C12
,0), 0),
4407 SR_CORE ("trcacvr7", CPENC (2,1,C2
,C14
,0), 0),
4408 SR_CORE ("trcacvr8", CPENC (2,1,C2
,C0
,1), 0),
4409 SR_CORE ("trcacvr9", CPENC (2,1,C2
,C2
,1), 0),
4410 SR_CORE ("trcacvr10", CPENC (2,1,C2
,C4
,1), 0),
4411 SR_CORE ("trcacvr11", CPENC (2,1,C2
,C6
,1), 0),
4412 SR_CORE ("trcacvr12", CPENC (2,1,C2
,C8
,1), 0),
4413 SR_CORE ("trcacvr13", CPENC (2,1,C2
,C10
,1), 0),
4414 SR_CORE ("trcacvr14", CPENC (2,1,C2
,C12
,1), 0),
4415 SR_CORE ("trcacvr15", CPENC (2,1,C2
,C14
,1), 0),
4416 SR_CORE ("trcauxctlr", CPENC (2,1,C0
,C6
,0), 0),
4417 SR_CORE ("trcbbctlr", CPENC (2,1,C0
,C15
,0), 0),
4418 SR_CORE ("trcccctlr", CPENC (2,1,C0
,C14
,0), 0),
4419 SR_CORE ("trccidcctlr0", CPENC (2,1,C3
,C0
,2), 0),
4420 SR_CORE ("trccidcctlr1", CPENC (2,1,C3
,C1
,2), 0),
4421 SR_CORE ("trccidcvr0", CPENC (2,1,C3
,C0
,0), 0),
4422 SR_CORE ("trccidcvr1", CPENC (2,1,C3
,C2
,0), 0),
4423 SR_CORE ("trccidcvr2", CPENC (2,1,C3
,C4
,0), 0),
4424 SR_CORE ("trccidcvr3", CPENC (2,1,C3
,C6
,0), 0),
4425 SR_CORE ("trccidcvr4", CPENC (2,1,C3
,C8
,0), 0),
4426 SR_CORE ("trccidcvr5", CPENC (2,1,C3
,C10
,0), 0),
4427 SR_CORE ("trccidcvr6", CPENC (2,1,C3
,C12
,0), 0),
4428 SR_CORE ("trccidcvr7", CPENC (2,1,C3
,C14
,0), 0),
4429 SR_CORE ("trcclaimclr", CPENC (2,1,C7
,C9
,6), 0),
4430 SR_CORE ("trcclaimset", CPENC (2,1,C7
,C8
,6), 0),
4431 SR_CORE ("trccntctlr0", CPENC (2,1,C0
,C4
,5), 0),
4432 SR_CORE ("trccntctlr1", CPENC (2,1,C0
,C5
,5), 0),
4433 SR_CORE ("trccntctlr2", CPENC (2,1,C0
,C6
,5), 0),
4434 SR_CORE ("trccntctlr3", CPENC (2,1,C0
,C7
,5), 0),
4435 SR_CORE ("trccntrldvr0", CPENC (2,1,C0
,C0
,5), 0),
4436 SR_CORE ("trccntrldvr1", CPENC (2,1,C0
,C1
,5), 0),
4437 SR_CORE ("trccntrldvr2", CPENC (2,1,C0
,C2
,5), 0),
4438 SR_CORE ("trccntrldvr3", CPENC (2,1,C0
,C3
,5), 0),
4439 SR_CORE ("trccntvr0", CPENC (2,1,C0
,C8
,5), 0),
4440 SR_CORE ("trccntvr1", CPENC (2,1,C0
,C9
,5), 0),
4441 SR_CORE ("trccntvr2", CPENC (2,1,C0
,C10
,5), 0),
4442 SR_CORE ("trccntvr3", CPENC (2,1,C0
,C11
,5), 0),
4443 SR_CORE ("trcconfigr", CPENC (2,1,C0
,C4
,0), 0),
4444 SR_CORE ("trcdvcmr0", CPENC (2,1,C2
,C0
,6), 0),
4445 SR_CORE ("trcdvcmr1", CPENC (2,1,C2
,C4
,6), 0),
4446 SR_CORE ("trcdvcmr2", CPENC (2,1,C2
,C8
,6), 0),
4447 SR_CORE ("trcdvcmr3", CPENC (2,1,C2
,C12
,6), 0),
4448 SR_CORE ("trcdvcmr4", CPENC (2,1,C2
,C0
,7), 0),
4449 SR_CORE ("trcdvcmr5", CPENC (2,1,C2
,C4
,7), 0),
4450 SR_CORE ("trcdvcmr6", CPENC (2,1,C2
,C8
,7), 0),
4451 SR_CORE ("trcdvcmr7", CPENC (2,1,C2
,C12
,7), 0),
4452 SR_CORE ("trcdvcvr0", CPENC (2,1,C2
,C0
,4), 0),
4453 SR_CORE ("trcdvcvr1", CPENC (2,1,C2
,C4
,4), 0),
4454 SR_CORE ("trcdvcvr2", CPENC (2,1,C2
,C8
,4), 0),
4455 SR_CORE ("trcdvcvr3", CPENC (2,1,C2
,C12
,4), 0),
4456 SR_CORE ("trcdvcvr4", CPENC (2,1,C2
,C0
,5), 0),
4457 SR_CORE ("trcdvcvr5", CPENC (2,1,C2
,C4
,5), 0),
4458 SR_CORE ("trcdvcvr6", CPENC (2,1,C2
,C8
,5), 0),
4459 SR_CORE ("trcdvcvr7", CPENC (2,1,C2
,C12
,5), 0),
4460 SR_CORE ("trceventctl0r", CPENC (2,1,C0
,C8
,0), 0),
4461 SR_CORE ("trceventctl1r", CPENC (2,1,C0
,C9
,0), 0),
4462 SR_CORE ("trcextinselr0", CPENC (2,1,C0
,C8
,4), 0),
4463 SR_CORE ("trcextinselr", CPENC (2,1,C0
,C8
,4), 0),
4464 SR_CORE ("trcextinselr1", CPENC (2,1,C0
,C9
,4), 0),
4465 SR_CORE ("trcextinselr2", CPENC (2,1,C0
,C10
,4), 0),
4466 SR_CORE ("trcextinselr3", CPENC (2,1,C0
,C11
,4), 0),
4467 SR_CORE ("trcimspec0", CPENC (2,1,C0
,C0
,7), 0),
4468 SR_CORE ("trcimspec0", CPENC (2,1,C0
,C0
,7), 0),
4469 SR_CORE ("trcimspec1", CPENC (2,1,C0
,C1
,7), 0),
4470 SR_CORE ("trcimspec2", CPENC (2,1,C0
,C2
,7), 0),
4471 SR_CORE ("trcimspec3", CPENC (2,1,C0
,C3
,7), 0),
4472 SR_CORE ("trcimspec4", CPENC (2,1,C0
,C4
,7), 0),
4473 SR_CORE ("trcimspec5", CPENC (2,1,C0
,C5
,7), 0),
4474 SR_CORE ("trcimspec6", CPENC (2,1,C0
,C6
,7), 0),
4475 SR_CORE ("trcimspec7", CPENC (2,1,C0
,C7
,7), 0),
4476 SR_CORE ("trcitctrl", CPENC (2,1,C7
,C0
,4), 0),
4477 SR_CORE ("trcpdcr", CPENC (2,1,C1
,C4
,4), 0),
4478 SR_CORE ("trcprgctlr", CPENC (2,1,C0
,C1
,0), 0),
4479 SR_CORE ("trcprocselr", CPENC (2,1,C0
,C2
,0), 0),
4480 SR_CORE ("trcqctlr", CPENC (2,1,C0
,C1
,1), 0),
4481 SR_CORE ("trcrsctlr2", CPENC (2,1,C1
,C2
,0), 0),
4482 SR_CORE ("trcrsctlr3", CPENC (2,1,C1
,C3
,0), 0),
4483 SR_CORE ("trcrsctlr4", CPENC (2,1,C1
,C4
,0), 0),
4484 SR_CORE ("trcrsctlr5", CPENC (2,1,C1
,C5
,0), 0),
4485 SR_CORE ("trcrsctlr6", CPENC (2,1,C1
,C6
,0), 0),
4486 SR_CORE ("trcrsctlr7", CPENC (2,1,C1
,C7
,0), 0),
4487 SR_CORE ("trcrsctlr8", CPENC (2,1,C1
,C8
,0), 0),
4488 SR_CORE ("trcrsctlr9", CPENC (2,1,C1
,C9
,0), 0),
4489 SR_CORE ("trcrsctlr10", CPENC (2,1,C1
,C10
,0), 0),
4490 SR_CORE ("trcrsctlr11", CPENC (2,1,C1
,C11
,0), 0),
4491 SR_CORE ("trcrsctlr12", CPENC (2,1,C1
,C12
,0), 0),
4492 SR_CORE ("trcrsctlr13", CPENC (2,1,C1
,C13
,0), 0),
4493 SR_CORE ("trcrsctlr14", CPENC (2,1,C1
,C14
,0), 0),
4494 SR_CORE ("trcrsctlr15", CPENC (2,1,C1
,C15
,0), 0),
4495 SR_CORE ("trcrsctlr16", CPENC (2,1,C1
,C0
,1), 0),
4496 SR_CORE ("trcrsctlr17", CPENC (2,1,C1
,C1
,1), 0),
4497 SR_CORE ("trcrsctlr18", CPENC (2,1,C1
,C2
,1), 0),
4498 SR_CORE ("trcrsctlr19", CPENC (2,1,C1
,C3
,1), 0),
4499 SR_CORE ("trcrsctlr20", CPENC (2,1,C1
,C4
,1), 0),
4500 SR_CORE ("trcrsctlr21", CPENC (2,1,C1
,C5
,1), 0),
4501 SR_CORE ("trcrsctlr22", CPENC (2,1,C1
,C6
,1), 0),
4502 SR_CORE ("trcrsctlr23", CPENC (2,1,C1
,C7
,1), 0),
4503 SR_CORE ("trcrsctlr24", CPENC (2,1,C1
,C8
,1), 0),
4504 SR_CORE ("trcrsctlr25", CPENC (2,1,C1
,C9
,1), 0),
4505 SR_CORE ("trcrsctlr26", CPENC (2,1,C1
,C10
,1), 0),
4506 SR_CORE ("trcrsctlr27", CPENC (2,1,C1
,C11
,1), 0),
4507 SR_CORE ("trcrsctlr28", CPENC (2,1,C1
,C12
,1), 0),
4508 SR_CORE ("trcrsctlr29", CPENC (2,1,C1
,C13
,1), 0),
4509 SR_CORE ("trcrsctlr30", CPENC (2,1,C1
,C14
,1), 0),
4510 SR_CORE ("trcrsctlr31", CPENC (2,1,C1
,C15
,1), 0),
4511 SR_CORE ("trcseqevr0", CPENC (2,1,C0
,C0
,4), 0),
4512 SR_CORE ("trcseqevr1", CPENC (2,1,C0
,C1
,4), 0),
4513 SR_CORE ("trcseqevr2", CPENC (2,1,C0
,C2
,4), 0),
4514 SR_CORE ("trcseqrstevr", CPENC (2,1,C0
,C6
,4), 0),
4515 SR_CORE ("trcseqstr", CPENC (2,1,C0
,C7
,4), 0),
4516 SR_CORE ("trcssccr0", CPENC (2,1,C1
,C0
,2), 0),
4517 SR_CORE ("trcssccr1", CPENC (2,1,C1
,C1
,2), 0),
4518 SR_CORE ("trcssccr2", CPENC (2,1,C1
,C2
,2), 0),
4519 SR_CORE ("trcssccr3", CPENC (2,1,C1
,C3
,2), 0),
4520 SR_CORE ("trcssccr4", CPENC (2,1,C1
,C4
,2), 0),
4521 SR_CORE ("trcssccr5", CPENC (2,1,C1
,C5
,2), 0),
4522 SR_CORE ("trcssccr6", CPENC (2,1,C1
,C6
,2), 0),
4523 SR_CORE ("trcssccr7", CPENC (2,1,C1
,C7
,2), 0),
4524 SR_CORE ("trcsscsr0", CPENC (2,1,C1
,C8
,2), 0),
4525 SR_CORE ("trcsscsr1", CPENC (2,1,C1
,C9
,2), 0),
4526 SR_CORE ("trcsscsr2", CPENC (2,1,C1
,C10
,2), 0),
4527 SR_CORE ("trcsscsr3", CPENC (2,1,C1
,C11
,2), 0),
4528 SR_CORE ("trcsscsr4", CPENC (2,1,C1
,C12
,2), 0),
4529 SR_CORE ("trcsscsr5", CPENC (2,1,C1
,C13
,2), 0),
4530 SR_CORE ("trcsscsr6", CPENC (2,1,C1
,C14
,2), 0),
4531 SR_CORE ("trcsscsr7", CPENC (2,1,C1
,C15
,2), 0),
4532 SR_CORE ("trcsspcicr0", CPENC (2,1,C1
,C0
,3), 0),
4533 SR_CORE ("trcsspcicr1", CPENC (2,1,C1
,C1
,3), 0),
4534 SR_CORE ("trcsspcicr2", CPENC (2,1,C1
,C2
,3), 0),
4535 SR_CORE ("trcsspcicr3", CPENC (2,1,C1
,C3
,3), 0),
4536 SR_CORE ("trcsspcicr4", CPENC (2,1,C1
,C4
,3), 0),
4537 SR_CORE ("trcsspcicr5", CPENC (2,1,C1
,C5
,3), 0),
4538 SR_CORE ("trcsspcicr6", CPENC (2,1,C1
,C6
,3), 0),
4539 SR_CORE ("trcsspcicr7", CPENC (2,1,C1
,C7
,3), 0),
4540 SR_CORE ("trcstallctlr", CPENC (2,1,C0
,C11
,0), 0),
4541 SR_CORE ("trcsyncpr", CPENC (2,1,C0
,C13
,0), 0),
4542 SR_CORE ("trctraceidr", CPENC (2,1,C0
,C0
,1), 0),
4543 SR_CORE ("trctsctlr", CPENC (2,1,C0
,C12
,0), 0),
4544 SR_CORE ("trcvdarcctlr", CPENC (2,1,C0
,C10
,2), 0),
4545 SR_CORE ("trcvdctlr", CPENC (2,1,C0
,C8
,2), 0),
4546 SR_CORE ("trcvdsacctlr", CPENC (2,1,C0
,C9
,2), 0),
4547 SR_CORE ("trcvictlr", CPENC (2,1,C0
,C0
,2), 0),
4548 SR_CORE ("trcviiectlr", CPENC (2,1,C0
,C1
,2), 0),
4549 SR_CORE ("trcvipcssctlr", CPENC (2,1,C0
,C3
,2), 0),
4550 SR_CORE ("trcvissctlr", CPENC (2,1,C0
,C2
,2), 0),
4551 SR_CORE ("trcvmidcctlr0", CPENC (2,1,C3
,C2
,2), 0),
4552 SR_CORE ("trcvmidcctlr1", CPENC (2,1,C3
,C3
,2), 0),
4553 SR_CORE ("trcvmidcvr0", CPENC (2,1,C3
,C0
,1), 0),
4554 SR_CORE ("trcvmidcvr1", CPENC (2,1,C3
,C2
,1), 0),
4555 SR_CORE ("trcvmidcvr2", CPENC (2,1,C3
,C4
,1), 0),
4556 SR_CORE ("trcvmidcvr3", CPENC (2,1,C3
,C6
,1), 0),
4557 SR_CORE ("trcvmidcvr4", CPENC (2,1,C3
,C8
,1), 0),
4558 SR_CORE ("trcvmidcvr5", CPENC (2,1,C3
,C10
,1), 0),
4559 SR_CORE ("trcvmidcvr6", CPENC (2,1,C3
,C12
,1), 0),
4560 SR_CORE ("trcvmidcvr7", CPENC (2,1,C3
,C14
,1), 0),
4561 SR_CORE ("trclar", CPENC (2,1,C7
,C12
,6), F_REG_WRITE
),
4562 SR_CORE ("trcoslar", CPENC (2,1,C1
,C0
,4), F_REG_WRITE
),
4564 SR_CORE ("csrcr_el0", CPENC (2,3,C8
,C0
,0), 0),
4565 SR_CORE ("csrptr_el0", CPENC (2,3,C8
,C0
,1), 0),
4566 SR_CORE ("csridr_el0", CPENC (2,3,C8
,C0
,2), F_REG_READ
),
4567 SR_CORE ("csrptridx_el0", CPENC (2,3,C8
,C0
,3), F_REG_READ
),
4568 SR_CORE ("csrcr_el1", CPENC (2,0,C8
,C0
,0), 0),
4569 SR_CORE ("csrcr_el12", CPENC (2,5,C8
,C0
,0), 0),
4570 SR_CORE ("csrptr_el1", CPENC (2,0,C8
,C0
,1), 0),
4571 SR_CORE ("csrptr_el12", CPENC (2,5,C8
,C0
,1), 0),
4572 SR_CORE ("csrptridx_el1", CPENC (2,0,C8
,C0
,3), F_REG_READ
),
4573 SR_CORE ("csrcr_el2", CPENC (2,4,C8
,C0
,0), 0),
4574 SR_CORE ("csrptr_el2", CPENC (2,4,C8
,C0
,1), 0),
4575 SR_CORE ("csrptridx_el2", CPENC (2,4,C8
,C0
,3), F_REG_READ
),
4577 SR_CORE ("brbcr_el1", CPENC (2,1,C9
,C0
,0), 0),
4578 SR_CORE ("brbcr_el12", CPENC (2,5,C9
,C0
,0), 0),
4579 SR_CORE ("brbfcr_el1", CPENC (2,1,C9
,C0
,1), 0),
4580 SR_CORE ("brbts_el1", CPENC (2,1,C9
,C0
,2), 0),
4581 SR_CORE ("brbinfinj_el1", CPENC (2,1,C9
,C1
,0), 0),
4582 SR_CORE ("brbsrcinj_el1", CPENC (2,1,C9
,C1
,1), 0),
4583 SR_CORE ("brbtgtinj_el1", CPENC (2,1,C9
,C1
,2), 0),
4584 SR_CORE ("brbidr0_el1", CPENC (2,1,C9
,C2
,0), F_REG_READ
),
4585 SR_CORE ("brbcr_el2", CPENC (2,4,C9
,C0
,0), 0),
4586 SR_CORE ("brbsrc0_el1", CPENC (2,1,C8
,C0
,1), F_REG_READ
),
4587 SR_CORE ("brbsrc1_el1", CPENC (2,1,C8
,C1
,1), F_REG_READ
),
4588 SR_CORE ("brbsrc2_el1", CPENC (2,1,C8
,C2
,1), F_REG_READ
),
4589 SR_CORE ("brbsrc3_el1", CPENC (2,1,C8
,C3
,1), F_REG_READ
),
4590 SR_CORE ("brbsrc4_el1", CPENC (2,1,C8
,C4
,1), F_REG_READ
),
4591 SR_CORE ("brbsrc5_el1", CPENC (2,1,C8
,C5
,1), F_REG_READ
),
4592 SR_CORE ("brbsrc6_el1", CPENC (2,1,C8
,C6
,1), F_REG_READ
),
4593 SR_CORE ("brbsrc7_el1", CPENC (2,1,C8
,C7
,1), F_REG_READ
),
4594 SR_CORE ("brbsrc8_el1", CPENC (2,1,C8
,C8
,1), F_REG_READ
),
4595 SR_CORE ("brbsrc9_el1", CPENC (2,1,C8
,C9
,1), F_REG_READ
),
4596 SR_CORE ("brbsrc10_el1", CPENC (2,1,C8
,C10
,1), F_REG_READ
),
4597 SR_CORE ("brbsrc11_el1", CPENC (2,1,C8
,C11
,1), F_REG_READ
),
4598 SR_CORE ("brbsrc12_el1", CPENC (2,1,C8
,C12
,1), F_REG_READ
),
4599 SR_CORE ("brbsrc13_el1", CPENC (2,1,C8
,C13
,1), F_REG_READ
),
4600 SR_CORE ("brbsrc14_el1", CPENC (2,1,C8
,C14
,1), F_REG_READ
),
4601 SR_CORE ("brbsrc15_el1", CPENC (2,1,C8
,C15
,1), F_REG_READ
),
4602 SR_CORE ("brbsrc16_el1", CPENC (2,1,C8
,C0
,5), F_REG_READ
),
4603 SR_CORE ("brbsrc17_el1", CPENC (2,1,C8
,C1
,5), F_REG_READ
),
4604 SR_CORE ("brbsrc18_el1", CPENC (2,1,C8
,C2
,5), F_REG_READ
),
4605 SR_CORE ("brbsrc19_el1", CPENC (2,1,C8
,C3
,5), F_REG_READ
),
4606 SR_CORE ("brbsrc20_el1", CPENC (2,1,C8
,C4
,5), F_REG_READ
),
4607 SR_CORE ("brbsrc21_el1", CPENC (2,1,C8
,C5
,5), F_REG_READ
),
4608 SR_CORE ("brbsrc22_el1", CPENC (2,1,C8
,C6
,5), F_REG_READ
),
4609 SR_CORE ("brbsrc23_el1", CPENC (2,1,C8
,C7
,5), F_REG_READ
),
4610 SR_CORE ("brbsrc24_el1", CPENC (2,1,C8
,C8
,5), F_REG_READ
),
4611 SR_CORE ("brbsrc25_el1", CPENC (2,1,C8
,C9
,5), F_REG_READ
),
4612 SR_CORE ("brbsrc26_el1", CPENC (2,1,C8
,C10
,5), F_REG_READ
),
4613 SR_CORE ("brbsrc27_el1", CPENC (2,1,C8
,C11
,5), F_REG_READ
),
4614 SR_CORE ("brbsrc28_el1", CPENC (2,1,C8
,C12
,5), F_REG_READ
),
4615 SR_CORE ("brbsrc29_el1", CPENC (2,1,C8
,C13
,5), F_REG_READ
),
4616 SR_CORE ("brbsrc30_el1", CPENC (2,1,C8
,C14
,5), F_REG_READ
),
4617 SR_CORE ("brbsrc31_el1", CPENC (2,1,C8
,C15
,5), F_REG_READ
),
4618 SR_CORE ("brbtgt0_el1", CPENC (2,1,C8
,C0
,2), F_REG_READ
),
4619 SR_CORE ("brbtgt1_el1", CPENC (2,1,C8
,C1
,2), F_REG_READ
),
4620 SR_CORE ("brbtgt2_el1", CPENC (2,1,C8
,C2
,2), F_REG_READ
),
4621 SR_CORE ("brbtgt3_el1", CPENC (2,1,C8
,C3
,2), F_REG_READ
),
4622 SR_CORE ("brbtgt4_el1", CPENC (2,1,C8
,C4
,2), F_REG_READ
),
4623 SR_CORE ("brbtgt5_el1", CPENC (2,1,C8
,C5
,2), F_REG_READ
),
4624 SR_CORE ("brbtgt6_el1", CPENC (2,1,C8
,C6
,2), F_REG_READ
),
4625 SR_CORE ("brbtgt7_el1", CPENC (2,1,C8
,C7
,2), F_REG_READ
),
4626 SR_CORE ("brbtgt8_el1", CPENC (2,1,C8
,C8
,2), F_REG_READ
),
4627 SR_CORE ("brbtgt9_el1", CPENC (2,1,C8
,C9
,2), F_REG_READ
),
4628 SR_CORE ("brbtgt10_el1", CPENC (2,1,C8
,C10
,2), F_REG_READ
),
4629 SR_CORE ("brbtgt11_el1", CPENC (2,1,C8
,C11
,2), F_REG_READ
),
4630 SR_CORE ("brbtgt12_el1", CPENC (2,1,C8
,C12
,2), F_REG_READ
),
4631 SR_CORE ("brbtgt13_el1", CPENC (2,1,C8
,C13
,2), F_REG_READ
),
4632 SR_CORE ("brbtgt14_el1", CPENC (2,1,C8
,C14
,2), F_REG_READ
),
4633 SR_CORE ("brbtgt15_el1", CPENC (2,1,C8
,C15
,2), F_REG_READ
),
4634 SR_CORE ("brbtgt16_el1", CPENC (2,1,C8
,C0
,6), F_REG_READ
),
4635 SR_CORE ("brbtgt17_el1", CPENC (2,1,C8
,C1
,6), F_REG_READ
),
4636 SR_CORE ("brbtgt18_el1", CPENC (2,1,C8
,C2
,6), F_REG_READ
),
4637 SR_CORE ("brbtgt19_el1", CPENC (2,1,C8
,C3
,6), F_REG_READ
),
4638 SR_CORE ("brbtgt20_el1", CPENC (2,1,C8
,C4
,6), F_REG_READ
),
4639 SR_CORE ("brbtgt21_el1", CPENC (2,1,C8
,C5
,6), F_REG_READ
),
4640 SR_CORE ("brbtgt22_el1", CPENC (2,1,C8
,C6
,6), F_REG_READ
),
4641 SR_CORE ("brbtgt23_el1", CPENC (2,1,C8
,C7
,6), F_REG_READ
),
4642 SR_CORE ("brbtgt24_el1", CPENC (2,1,C8
,C8
,6), F_REG_READ
),
4643 SR_CORE ("brbtgt25_el1", CPENC (2,1,C8
,C9
,6), F_REG_READ
),
4644 SR_CORE ("brbtgt26_el1", CPENC (2,1,C8
,C10
,6), F_REG_READ
),
4645 SR_CORE ("brbtgt27_el1", CPENC (2,1,C8
,C11
,6), F_REG_READ
),
4646 SR_CORE ("brbtgt28_el1", CPENC (2,1,C8
,C12
,6), F_REG_READ
),
4647 SR_CORE ("brbtgt29_el1", CPENC (2,1,C8
,C13
,6), F_REG_READ
),
4648 SR_CORE ("brbtgt30_el1", CPENC (2,1,C8
,C14
,6), F_REG_READ
),
4649 SR_CORE ("brbtgt31_el1", CPENC (2,1,C8
,C15
,6), F_REG_READ
),
4650 SR_CORE ("brbinf0_el1", CPENC (2,1,C8
,C0
,0), F_REG_READ
),
4651 SR_CORE ("brbinf1_el1", CPENC (2,1,C8
,C1
,0), F_REG_READ
),
4652 SR_CORE ("brbinf2_el1", CPENC (2,1,C8
,C2
,0), F_REG_READ
),
4653 SR_CORE ("brbinf3_el1", CPENC (2,1,C8
,C3
,0), F_REG_READ
),
4654 SR_CORE ("brbinf4_el1", CPENC (2,1,C8
,C4
,0), F_REG_READ
),
4655 SR_CORE ("brbinf5_el1", CPENC (2,1,C8
,C5
,0), F_REG_READ
),
4656 SR_CORE ("brbinf6_el1", CPENC (2,1,C8
,C6
,0), F_REG_READ
),
4657 SR_CORE ("brbinf7_el1", CPENC (2,1,C8
,C7
,0), F_REG_READ
),
4658 SR_CORE ("brbinf8_el1", CPENC (2,1,C8
,C8
,0), F_REG_READ
),
4659 SR_CORE ("brbinf9_el1", CPENC (2,1,C8
,C9
,0), F_REG_READ
),
4660 SR_CORE ("brbinf10_el1", CPENC (2,1,C8
,C10
,0), F_REG_READ
),
4661 SR_CORE ("brbinf11_el1", CPENC (2,1,C8
,C11
,0), F_REG_READ
),
4662 SR_CORE ("brbinf12_el1", CPENC (2,1,C8
,C12
,0), F_REG_READ
),
4663 SR_CORE ("brbinf13_el1", CPENC (2,1,C8
,C13
,0), F_REG_READ
),
4664 SR_CORE ("brbinf14_el1", CPENC (2,1,C8
,C14
,0), F_REG_READ
),
4665 SR_CORE ("brbinf15_el1", CPENC (2,1,C8
,C15
,0), F_REG_READ
),
4666 SR_CORE ("brbinf16_el1", CPENC (2,1,C8
,C0
,4), F_REG_READ
),
4667 SR_CORE ("brbinf17_el1", CPENC (2,1,C8
,C1
,4), F_REG_READ
),
4668 SR_CORE ("brbinf18_el1", CPENC (2,1,C8
,C2
,4), F_REG_READ
),
4669 SR_CORE ("brbinf19_el1", CPENC (2,1,C8
,C3
,4), F_REG_READ
),
4670 SR_CORE ("brbinf20_el1", CPENC (2,1,C8
,C4
,4), F_REG_READ
),
4671 SR_CORE ("brbinf21_el1", CPENC (2,1,C8
,C5
,4), F_REG_READ
),
4672 SR_CORE ("brbinf22_el1", CPENC (2,1,C8
,C6
,4), F_REG_READ
),
4673 SR_CORE ("brbinf23_el1", CPENC (2,1,C8
,C7
,4), F_REG_READ
),
4674 SR_CORE ("brbinf24_el1", CPENC (2,1,C8
,C8
,4), F_REG_READ
),
4675 SR_CORE ("brbinf25_el1", CPENC (2,1,C8
,C9
,4), F_REG_READ
),
4676 SR_CORE ("brbinf26_el1", CPENC (2,1,C8
,C10
,4), F_REG_READ
),
4677 SR_CORE ("brbinf27_el1", CPENC (2,1,C8
,C11
,4), F_REG_READ
),
4678 SR_CORE ("brbinf28_el1", CPENC (2,1,C8
,C12
,4), F_REG_READ
),
4679 SR_CORE ("brbinf29_el1", CPENC (2,1,C8
,C13
,4), F_REG_READ
),
4680 SR_CORE ("brbinf30_el1", CPENC (2,1,C8
,C14
,4), F_REG_READ
),
4681 SR_CORE ("brbinf31_el1", CPENC (2,1,C8
,C15
,4), F_REG_READ
),
4683 SR_CORE ("accdata_el1", CPENC (3,0,C13
,C0
,5), 0),
4685 { 0, CPENC (0,0,0,0,0), 0, 0 }
4689 aarch64_sys_reg_deprecated_p (const uint32_t reg_flags
)
4691 return (reg_flags
& F_DEPRECATED
) != 0;
4694 /* The CPENC below is fairly misleading, the fields
4695 here are not in CPENC form. They are in op2op1 form. The fields are encoded
4696 by ins_pstatefield, which just shifts the value by the width of the fields
4697 in a loop. So if you CPENC them only the first value will be set, the rest
4698 are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4699 value of 0b110000000001000000 (0x30040) while what you want is
4701 const aarch64_sys_reg aarch64_pstatefields
[] =
4703 SR_CORE ("spsel", 0x05, 0),
4704 SR_CORE ("daifset", 0x1e, 0),
4705 SR_CORE ("daifclr", 0x1f, 0),
4706 SR_PAN ("pan", 0x04, 0),
4707 SR_V8_2 ("uao", 0x03, 0),
4708 SR_SSBS ("ssbs", 0x19, 0),
4709 SR_V8_4 ("dit", 0x1a, 0),
4710 SR_MEMTAG ("tco", 0x1c, 0),
4711 { 0, CPENC (0,0,0,0,0), 0, 0 },
4715 aarch64_pstatefield_supported_p (const aarch64_feature_set features
,
4716 const aarch64_sys_reg
*reg
)
4718 if (!(reg
->flags
& F_ARCHEXT
))
4721 return AARCH64_CPU_HAS_ALL_FEATURES (features
, reg
->features
);
4724 const aarch64_sys_ins_reg aarch64_sys_regs_ic
[] =
4726 { "ialluis", CPENS(0,C7
,C1
,0), 0 },
4727 { "iallu", CPENS(0,C7
,C5
,0), 0 },
4728 { "ivau", CPENS (3, C7
, C5
, 1), F_HASXT
},
4729 { 0, CPENS(0,0,0,0), 0 }
4732 const aarch64_sys_ins_reg aarch64_sys_regs_dc
[] =
4734 { "zva", CPENS (3, C7
, C4
, 1), F_HASXT
},
4735 { "gva", CPENS (3, C7
, C4
, 3), F_HASXT
| F_ARCHEXT
},
4736 { "gzva", CPENS (3, C7
, C4
, 4), F_HASXT
| F_ARCHEXT
},
4737 { "ivac", CPENS (0, C7
, C6
, 1), F_HASXT
},
4738 { "igvac", CPENS (0, C7
, C6
, 3), F_HASXT
| F_ARCHEXT
},
4739 { "igsw", CPENS (0, C7
, C6
, 4), F_HASXT
| F_ARCHEXT
},
4740 { "isw", CPENS (0, C7
, C6
, 2), F_HASXT
},
4741 { "igdvac", CPENS (0, C7
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4742 { "igdsw", CPENS (0, C7
, C6
, 6), F_HASXT
| F_ARCHEXT
},
4743 { "cvac", CPENS (3, C7
, C10
, 1), F_HASXT
},
4744 { "cgvac", CPENS (3, C7
, C10
, 3), F_HASXT
| F_ARCHEXT
},
4745 { "cgdvac", CPENS (3, C7
, C10
, 5), F_HASXT
| F_ARCHEXT
},
4746 { "csw", CPENS (0, C7
, C10
, 2), F_HASXT
},
4747 { "cgsw", CPENS (0, C7
, C10
, 4), F_HASXT
| F_ARCHEXT
},
4748 { "cgdsw", CPENS (0, C7
, C10
, 6), F_HASXT
| F_ARCHEXT
},
4749 { "cvau", CPENS (3, C7
, C11
, 1), F_HASXT
},
4750 { "cvap", CPENS (3, C7
, C12
, 1), F_HASXT
| F_ARCHEXT
},
4751 { "cgvap", CPENS (3, C7
, C12
, 3), F_HASXT
| F_ARCHEXT
},
4752 { "cgdvap", CPENS (3, C7
, C12
, 5), F_HASXT
| F_ARCHEXT
},
4753 { "cvadp", CPENS (3, C7
, C13
, 1), F_HASXT
| F_ARCHEXT
},
4754 { "cgvadp", CPENS (3, C7
, C13
, 3), F_HASXT
| F_ARCHEXT
},
4755 { "cgdvadp", CPENS (3, C7
, C13
, 5), F_HASXT
| F_ARCHEXT
},
4756 { "civac", CPENS (3, C7
, C14
, 1), F_HASXT
},
4757 { "cigvac", CPENS (3, C7
, C14
, 3), F_HASXT
| F_ARCHEXT
},
4758 { "cigdvac", CPENS (3, C7
, C14
, 5), F_HASXT
| F_ARCHEXT
},
4759 { "cisw", CPENS (0, C7
, C14
, 2), F_HASXT
},
4760 { "cigsw", CPENS (0, C7
, C14
, 4), F_HASXT
| F_ARCHEXT
},
4761 { "cigdsw", CPENS (0, C7
, C14
, 6), F_HASXT
| F_ARCHEXT
},
4762 { 0, CPENS(0,0,0,0), 0 }
4765 const aarch64_sys_ins_reg aarch64_sys_regs_at
[] =
4767 { "s1e1r", CPENS (0, C7
, C8
, 0), F_HASXT
},
4768 { "s1e1w", CPENS (0, C7
, C8
, 1), F_HASXT
},
4769 { "s1e0r", CPENS (0, C7
, C8
, 2), F_HASXT
},
4770 { "s1e0w", CPENS (0, C7
, C8
, 3), F_HASXT
},
4771 { "s12e1r", CPENS (4, C7
, C8
, 4), F_HASXT
},
4772 { "s12e1w", CPENS (4, C7
, C8
, 5), F_HASXT
},
4773 { "s12e0r", CPENS (4, C7
, C8
, 6), F_HASXT
},
4774 { "s12e0w", CPENS (4, C7
, C8
, 7), F_HASXT
},
4775 { "s1e2r", CPENS (4, C7
, C8
, 0), F_HASXT
},
4776 { "s1e2w", CPENS (4, C7
, C8
, 1), F_HASXT
},
4777 { "s1e3r", CPENS (6, C7
, C8
, 0), F_HASXT
},
4778 { "s1e3w", CPENS (6, C7
, C8
, 1), F_HASXT
},
4779 { "s1e1rp", CPENS (0, C7
, C9
, 0), F_HASXT
| F_ARCHEXT
},
4780 { "s1e1wp", CPENS (0, C7
, C9
, 1), F_HASXT
| F_ARCHEXT
},
4781 { 0, CPENS(0,0,0,0), 0 }
4784 const aarch64_sys_ins_reg aarch64_sys_regs_tlbi
[] =
4786 { "vmalle1", CPENS(0,C8
,C7
,0), 0 },
4787 { "vae1", CPENS (0, C8
, C7
, 1), F_HASXT
},
4788 { "aside1", CPENS (0, C8
, C7
, 2), F_HASXT
},
4789 { "vaae1", CPENS (0, C8
, C7
, 3), F_HASXT
},
4790 { "vmalle1is", CPENS(0,C8
,C3
,0), 0 },
4791 { "vae1is", CPENS (0, C8
, C3
, 1), F_HASXT
},
4792 { "aside1is", CPENS (0, C8
, C3
, 2), F_HASXT
},
4793 { "vaae1is", CPENS (0, C8
, C3
, 3), F_HASXT
},
4794 { "ipas2e1is", CPENS (4, C8
, C0
, 1), F_HASXT
},
4795 { "ipas2le1is",CPENS (4, C8
, C0
, 5), F_HASXT
},
4796 { "ipas2e1", CPENS (4, C8
, C4
, 1), F_HASXT
},
4797 { "ipas2le1", CPENS (4, C8
, C4
, 5), F_HASXT
},
4798 { "vae2", CPENS (4, C8
, C7
, 1), F_HASXT
},
4799 { "vae2is", CPENS (4, C8
, C3
, 1), F_HASXT
},
4800 { "vmalls12e1",CPENS(4,C8
,C7
,6), 0 },
4801 { "vmalls12e1is",CPENS(4,C8
,C3
,6), 0 },
4802 { "vae3", CPENS (6, C8
, C7
, 1), F_HASXT
},
4803 { "vae3is", CPENS (6, C8
, C3
, 1), F_HASXT
},
4804 { "alle2", CPENS(4,C8
,C7
,0), 0 },
4805 { "alle2is", CPENS(4,C8
,C3
,0), 0 },
4806 { "alle1", CPENS(4,C8
,C7
,4), 0 },
4807 { "alle1is", CPENS(4,C8
,C3
,4), 0 },
4808 { "alle3", CPENS(6,C8
,C7
,0), 0 },
4809 { "alle3is", CPENS(6,C8
,C3
,0), 0 },
4810 { "vale1is", CPENS (0, C8
, C3
, 5), F_HASXT
},
4811 { "vale2is", CPENS (4, C8
, C3
, 5), F_HASXT
},
4812 { "vale3is", CPENS (6, C8
, C3
, 5), F_HASXT
},
4813 { "vaale1is", CPENS (0, C8
, C3
, 7), F_HASXT
},
4814 { "vale1", CPENS (0, C8
, C7
, 5), F_HASXT
},
4815 { "vale2", CPENS (4, C8
, C7
, 5), F_HASXT
},
4816 { "vale3", CPENS (6, C8
, C7
, 5), F_HASXT
},
4817 { "vaale1", CPENS (0, C8
, C7
, 7), F_HASXT
},
4819 { "vmalle1os", CPENS (0, C8
, C1
, 0), F_ARCHEXT
},
4820 { "vae1os", CPENS (0, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4821 { "aside1os", CPENS (0, C8
, C1
, 2), F_HASXT
| F_ARCHEXT
},
4822 { "vaae1os", CPENS (0, C8
, C1
, 3), F_HASXT
| F_ARCHEXT
},
4823 { "vale1os", CPENS (0, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4824 { "vaale1os", CPENS (0, C8
, C1
, 7), F_HASXT
| F_ARCHEXT
},
4825 { "ipas2e1os", CPENS (4, C8
, C4
, 0), F_HASXT
| F_ARCHEXT
},
4826 { "ipas2le1os", CPENS (4, C8
, C4
, 4), F_HASXT
| F_ARCHEXT
},
4827 { "vae2os", CPENS (4, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4828 { "vale2os", CPENS (4, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4829 { "vmalls12e1os", CPENS (4, C8
, C1
, 6), F_ARCHEXT
},
4830 { "vae3os", CPENS (6, C8
, C1
, 1), F_HASXT
| F_ARCHEXT
},
4831 { "vale3os", CPENS (6, C8
, C1
, 5), F_HASXT
| F_ARCHEXT
},
4832 { "alle2os", CPENS (4, C8
, C1
, 0), F_ARCHEXT
},
4833 { "alle1os", CPENS (4, C8
, C1
, 4), F_ARCHEXT
},
4834 { "alle3os", CPENS (6, C8
, C1
, 0), F_ARCHEXT
},
4836 { "rvae1", CPENS (0, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4837 { "rvaae1", CPENS (0, C8
, C6
, 3), F_HASXT
| F_ARCHEXT
},
4838 { "rvale1", CPENS (0, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4839 { "rvaale1", CPENS (0, C8
, C6
, 7), F_HASXT
| F_ARCHEXT
},
4840 { "rvae1is", CPENS (0, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4841 { "rvaae1is", CPENS (0, C8
, C2
, 3), F_HASXT
| F_ARCHEXT
},
4842 { "rvale1is", CPENS (0, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4843 { "rvaale1is", CPENS (0, C8
, C2
, 7), F_HASXT
| F_ARCHEXT
},
4844 { "rvae1os", CPENS (0, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4845 { "rvaae1os", CPENS (0, C8
, C5
, 3), F_HASXT
| F_ARCHEXT
},
4846 { "rvale1os", CPENS (0, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4847 { "rvaale1os", CPENS (0, C8
, C5
, 7), F_HASXT
| F_ARCHEXT
},
4848 { "ripas2e1is", CPENS (4, C8
, C0
, 2), F_HASXT
| F_ARCHEXT
},
4849 { "ripas2le1is",CPENS (4, C8
, C0
, 6), F_HASXT
| F_ARCHEXT
},
4850 { "ripas2e1", CPENS (4, C8
, C4
, 2), F_HASXT
| F_ARCHEXT
},
4851 { "ripas2le1", CPENS (4, C8
, C4
, 6), F_HASXT
| F_ARCHEXT
},
4852 { "ripas2e1os", CPENS (4, C8
, C4
, 3), F_HASXT
| F_ARCHEXT
},
4853 { "ripas2le1os",CPENS (4, C8
, C4
, 7), F_HASXT
| F_ARCHEXT
},
4854 { "rvae2", CPENS (4, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4855 { "rvale2", CPENS (4, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4856 { "rvae2is", CPENS (4, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4857 { "rvale2is", CPENS (4, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4858 { "rvae2os", CPENS (4, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4859 { "rvale2os", CPENS (4, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4860 { "rvae3", CPENS (6, C8
, C6
, 1), F_HASXT
| F_ARCHEXT
},
4861 { "rvale3", CPENS (6, C8
, C6
, 5), F_HASXT
| F_ARCHEXT
},
4862 { "rvae3is", CPENS (6, C8
, C2
, 1), F_HASXT
| F_ARCHEXT
},
4863 { "rvale3is", CPENS (6, C8
, C2
, 5), F_HASXT
| F_ARCHEXT
},
4864 { "rvae3os", CPENS (6, C8
, C5
, 1), F_HASXT
| F_ARCHEXT
},
4865 { "rvale3os", CPENS (6, C8
, C5
, 5), F_HASXT
| F_ARCHEXT
},
4867 { 0, CPENS(0,0,0,0), 0 }
4870 const aarch64_sys_ins_reg aarch64_sys_regs_sr
[] =
4872 /* RCTX is somewhat unique in a way that it has different values
4873 (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4874 Thus op2 is masked out and instead encoded directly in the
4875 aarch64_opcode_table entries for the respective instructions. */
4876 { "rctx", CPENS(3,C7
,C3
,0), F_HASXT
| F_ARCHEXT
| F_REG_WRITE
}, /* WO */
4878 { 0, CPENS(0,0,0,0), 0 }
4882 aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg
*sys_ins_reg
)
4884 return (sys_ins_reg
->flags
& F_HASXT
) != 0;
4888 aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features
,
4889 const char *reg_name
,
4890 aarch64_insn reg_value
,
4892 aarch64_feature_set reg_features
)
4894 /* Armv8-R has no EL3. */
4895 if (AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_R
))
4897 const char *suffix
= strrchr (reg_name
, '_');
4898 if (suffix
&& !strcmp (suffix
, "_el3"))
4902 if (!(reg_flags
& F_ARCHEXT
))
4906 && AARCH64_CPU_HAS_ALL_FEATURES (features
, reg_features
))
4909 /* ARMv8.4 TLB instructions. */
4910 if ((reg_value
== CPENS (0, C8
, C1
, 0)
4911 || reg_value
== CPENS (0, C8
, C1
, 1)
4912 || reg_value
== CPENS (0, C8
, C1
, 2)
4913 || reg_value
== CPENS (0, C8
, C1
, 3)
4914 || reg_value
== CPENS (0, C8
, C1
, 5)
4915 || reg_value
== CPENS (0, C8
, C1
, 7)
4916 || reg_value
== CPENS (4, C8
, C4
, 0)
4917 || reg_value
== CPENS (4, C8
, C4
, 4)
4918 || reg_value
== CPENS (4, C8
, C1
, 1)
4919 || reg_value
== CPENS (4, C8
, C1
, 5)
4920 || reg_value
== CPENS (4, C8
, C1
, 6)
4921 || reg_value
== CPENS (6, C8
, C1
, 1)
4922 || reg_value
== CPENS (6, C8
, C1
, 5)
4923 || reg_value
== CPENS (4, C8
, C1
, 0)
4924 || reg_value
== CPENS (4, C8
, C1
, 4)
4925 || reg_value
== CPENS (6, C8
, C1
, 0)
4926 || reg_value
== CPENS (0, C8
, C6
, 1)
4927 || reg_value
== CPENS (0, C8
, C6
, 3)
4928 || reg_value
== CPENS (0, C8
, C6
, 5)
4929 || reg_value
== CPENS (0, C8
, C6
, 7)
4930 || reg_value
== CPENS (0, C8
, C2
, 1)
4931 || reg_value
== CPENS (0, C8
, C2
, 3)
4932 || reg_value
== CPENS (0, C8
, C2
, 5)
4933 || reg_value
== CPENS (0, C8
, C2
, 7)
4934 || reg_value
== CPENS (0, C8
, C5
, 1)
4935 || reg_value
== CPENS (0, C8
, C5
, 3)
4936 || reg_value
== CPENS (0, C8
, C5
, 5)
4937 || reg_value
== CPENS (0, C8
, C5
, 7)
4938 || reg_value
== CPENS (4, C8
, C0
, 2)
4939 || reg_value
== CPENS (4, C8
, C0
, 6)
4940 || reg_value
== CPENS (4, C8
, C4
, 2)
4941 || reg_value
== CPENS (4, C8
, C4
, 6)
4942 || reg_value
== CPENS (4, C8
, C4
, 3)
4943 || reg_value
== CPENS (4, C8
, C4
, 7)
4944 || reg_value
== CPENS (4, C8
, C6
, 1)
4945 || reg_value
== CPENS (4, C8
, C6
, 5)
4946 || reg_value
== CPENS (4, C8
, C2
, 1)
4947 || reg_value
== CPENS (4, C8
, C2
, 5)
4948 || reg_value
== CPENS (4, C8
, C5
, 1)
4949 || reg_value
== CPENS (4, C8
, C5
, 5)
4950 || reg_value
== CPENS (6, C8
, C6
, 1)
4951 || reg_value
== CPENS (6, C8
, C6
, 5)
4952 || reg_value
== CPENS (6, C8
, C2
, 1)
4953 || reg_value
== CPENS (6, C8
, C2
, 5)
4954 || reg_value
== CPENS (6, C8
, C5
, 1)
4955 || reg_value
== CPENS (6, C8
, C5
, 5))
4956 && AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_4
))
4959 /* DC CVAP. Values are from aarch64_sys_regs_dc. */
4960 if (reg_value
== CPENS (3, C7
, C12
, 1)
4961 && AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4964 /* DC CVADP. Values are from aarch64_sys_regs_dc. */
4965 if (reg_value
== CPENS (3, C7
, C13
, 1)
4966 && AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_CVADP
))
4969 /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension. */
4970 if ((reg_value
== CPENS (0, C7
, C6
, 3)
4971 || reg_value
== CPENS (0, C7
, C6
, 4)
4972 || reg_value
== CPENS (0, C7
, C10
, 4)
4973 || reg_value
== CPENS (0, C7
, C14
, 4)
4974 || reg_value
== CPENS (3, C7
, C10
, 3)
4975 || reg_value
== CPENS (3, C7
, C12
, 3)
4976 || reg_value
== CPENS (3, C7
, C13
, 3)
4977 || reg_value
== CPENS (3, C7
, C14
, 3)
4978 || reg_value
== CPENS (3, C7
, C4
, 3)
4979 || reg_value
== CPENS (0, C7
, C6
, 5)
4980 || reg_value
== CPENS (0, C7
, C6
, 6)
4981 || reg_value
== CPENS (0, C7
, C10
, 6)
4982 || reg_value
== CPENS (0, C7
, C14
, 6)
4983 || reg_value
== CPENS (3, C7
, C10
, 5)
4984 || reg_value
== CPENS (3, C7
, C12
, 5)
4985 || reg_value
== CPENS (3, C7
, C13
, 5)
4986 || reg_value
== CPENS (3, C7
, C14
, 5)
4987 || reg_value
== CPENS (3, C7
, C4
, 4))
4988 && AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_MEMTAG
))
4991 /* AT S1E1RP, AT S1E1WP. Values are from aarch64_sys_regs_at. */
4992 if ((reg_value
== CPENS (0, C7
, C9
, 0)
4993 || reg_value
== CPENS (0, C7
, C9
, 1))
4994 && AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_V8_2
))
4997 /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
4998 if (reg_value
== CPENS (3, C7
, C3
, 0)
4999 && AARCH64_CPU_HAS_FEATURE (features
, AARCH64_FEATURE_PREDRES
))
5022 #define BIT(INSN,BT) (((INSN) >> (BT)) & 1)
5023 #define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5025 static enum err_type
5026 verify_ldpsw (const struct aarch64_inst
*inst ATTRIBUTE_UNUSED
,
5027 const aarch64_insn insn
, bfd_vma pc ATTRIBUTE_UNUSED
,
5028 bfd_boolean encoding ATTRIBUTE_UNUSED
,
5029 aarch64_operand_error
*mismatch_detail ATTRIBUTE_UNUSED
,
5030 aarch64_instr_sequence
*insn_sequence ATTRIBUTE_UNUSED
)
5032 int t
= BITS (insn
, 4, 0);
5033 int n
= BITS (insn
, 9, 5);
5034 int t2
= BITS (insn
, 14, 10);
5038 /* Write back enabled. */
5039 if ((t
== n
|| t2
== n
) && n
!= 31)
5053 /* Verifier for vector by element 3 operands functions where the
5054 conditions `if sz:L == 11 then UNDEFINED` holds. */
5056 static enum err_type
5057 verify_elem_sd (const struct aarch64_inst
*inst
, const aarch64_insn insn
,
5058 bfd_vma pc ATTRIBUTE_UNUSED
, bfd_boolean encoding
,
5059 aarch64_operand_error
*mismatch_detail ATTRIBUTE_UNUSED
,
5060 aarch64_instr_sequence
*insn_sequence ATTRIBUTE_UNUSED
)
5062 const aarch64_insn undef_pattern
= 0x3;
5065 assert (inst
->opcode
);
5066 assert (inst
->opcode
->operands
[2] == AARCH64_OPND_Em
);
5067 value
= encoding
? inst
->value
: insn
;
5070 if (undef_pattern
== extract_fields (value
, 0, 2, FLD_sz
, FLD_L
))
5076 /* Initialize an instruction sequence insn_sequence with the instruction INST.
5077 If INST is NULL the given insn_sequence is cleared and the sequence is left
5081 init_insn_sequence (const struct aarch64_inst
*inst
,
5082 aarch64_instr_sequence
*insn_sequence
)
5084 int num_req_entries
= 0;
5085 insn_sequence
->next_insn
= 0;
5086 insn_sequence
->num_insns
= num_req_entries
;
5087 if (insn_sequence
->instr
)
5088 XDELETE (insn_sequence
->instr
);
5089 insn_sequence
->instr
= NULL
;
5093 insn_sequence
->instr
= XNEW (aarch64_inst
);
5094 memcpy (insn_sequence
->instr
, inst
, sizeof (aarch64_inst
));
5097 /* Handle all the cases here. May need to think of something smarter than
5098 a giant if/else chain if this grows. At that time, a lookup table may be
5100 if (inst
&& inst
->opcode
->constraints
& C_SCAN_MOVPRFX
)
5101 num_req_entries
= 1;
5103 if (insn_sequence
->current_insns
)
5104 XDELETEVEC (insn_sequence
->current_insns
);
5105 insn_sequence
->current_insns
= NULL
;
5107 if (num_req_entries
!= 0)
5109 size_t size
= num_req_entries
* sizeof (aarch64_inst
);
5110 insn_sequence
->current_insns
5111 = (aarch64_inst
**) XNEWVEC (aarch64_inst
, num_req_entries
);
5112 memset (insn_sequence
->current_insns
, 0, size
);
5117 /* This function verifies that the instruction INST adheres to its specified
5118 constraints. If it does then ERR_OK is returned, if not then ERR_VFI is
5119 returned and MISMATCH_DETAIL contains the reason why verification failed.
5121 The function is called both during assembly and disassembly. If assembling
5122 then ENCODING will be TRUE, else FALSE. If dissassembling PC will be set
5123 and will contain the PC of the current instruction w.r.t to the section.
5125 If ENCODING and PC=0 then you are at a start of a section. The constraints
5126 are verified against the given state insn_sequence which is updated as it
5127 transitions through the verification. */
5130 verify_constraints (const struct aarch64_inst
*inst
,
5131 const aarch64_insn insn ATTRIBUTE_UNUSED
,
5133 bfd_boolean encoding
,
5134 aarch64_operand_error
*mismatch_detail
,
5135 aarch64_instr_sequence
*insn_sequence
)
5138 assert (inst
->opcode
);
5140 const struct aarch64_opcode
*opcode
= inst
->opcode
;
5141 if (!opcode
->constraints
&& !insn_sequence
->instr
)
5144 assert (insn_sequence
);
5146 enum err_type res
= ERR_OK
;
5148 /* This instruction puts a constraint on the insn_sequence. */
5149 if (opcode
->flags
& F_SCAN
)
5151 if (insn_sequence
->instr
)
5153 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5154 mismatch_detail
->error
= _("instruction opens new dependency "
5155 "sequence without ending previous one");
5156 mismatch_detail
->index
= -1;
5157 mismatch_detail
->non_fatal
= TRUE
;
5161 init_insn_sequence (inst
, insn_sequence
);
5165 /* Verify constraints on an existing sequence. */
5166 if (insn_sequence
->instr
)
5168 const struct aarch64_opcode
* inst_opcode
= insn_sequence
->instr
->opcode
;
5169 /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5170 closed a previous one that we should have. */
5171 if (!encoding
&& pc
== 0)
5173 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5174 mismatch_detail
->error
= _("previous `movprfx' sequence not closed");
5175 mismatch_detail
->index
= -1;
5176 mismatch_detail
->non_fatal
= TRUE
;
5178 /* Reset the sequence. */
5179 init_insn_sequence (NULL
, insn_sequence
);
5183 /* Validate C_SCAN_MOVPRFX constraints. Move this to a lookup table. */
5184 if (inst_opcode
->constraints
& C_SCAN_MOVPRFX
)
5186 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5187 instruction for better error messages. */
5188 if (!opcode
->avariant
5189 || !(*opcode
->avariant
&
5190 (AARCH64_FEATURE_SVE
| AARCH64_FEATURE_SVE2
)))
5192 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5193 mismatch_detail
->error
= _("SVE instruction expected after "
5195 mismatch_detail
->index
= -1;
5196 mismatch_detail
->non_fatal
= TRUE
;
5201 /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5202 instruction that is allowed to be used with a MOVPRFX. */
5203 if (!(opcode
->constraints
& C_SCAN_MOVPRFX
))
5205 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5206 mismatch_detail
->error
= _("SVE `movprfx' compatible instruction "
5208 mismatch_detail
->index
= -1;
5209 mismatch_detail
->non_fatal
= TRUE
;
5214 /* Next check for usage of the predicate register. */
5215 aarch64_opnd_info blk_dest
= insn_sequence
->instr
->operands
[0];
5216 aarch64_opnd_info blk_pred
, inst_pred
;
5217 memset (&blk_pred
, 0, sizeof (aarch64_opnd_info
));
5218 memset (&inst_pred
, 0, sizeof (aarch64_opnd_info
));
5219 bfd_boolean predicated
= FALSE
;
5220 assert (blk_dest
.type
== AARCH64_OPND_SVE_Zd
);
5222 /* Determine if the movprfx instruction used is predicated or not. */
5223 if (insn_sequence
->instr
->operands
[1].type
== AARCH64_OPND_SVE_Pg3
)
5226 blk_pred
= insn_sequence
->instr
->operands
[1];
5229 unsigned char max_elem_size
= 0;
5230 unsigned char current_elem_size
;
5231 int num_op_used
= 0, last_op_usage
= 0;
5232 int i
, inst_pred_idx
= -1;
5233 int num_ops
= aarch64_num_of_operands (opcode
);
5234 for (i
= 0; i
< num_ops
; i
++)
5236 aarch64_opnd_info inst_op
= inst
->operands
[i
];
5237 switch (inst_op
.type
)
5239 case AARCH64_OPND_SVE_Zd
:
5240 case AARCH64_OPND_SVE_Zm_5
:
5241 case AARCH64_OPND_SVE_Zm_16
:
5242 case AARCH64_OPND_SVE_Zn
:
5243 case AARCH64_OPND_SVE_Zt
:
5244 case AARCH64_OPND_SVE_Vm
:
5245 case AARCH64_OPND_SVE_Vn
:
5246 case AARCH64_OPND_Va
:
5247 case AARCH64_OPND_Vn
:
5248 case AARCH64_OPND_Vm
:
5249 case AARCH64_OPND_Sn
:
5250 case AARCH64_OPND_Sm
:
5251 if (inst_op
.reg
.regno
== blk_dest
.reg
.regno
)
5257 = aarch64_get_qualifier_esize (inst_op
.qualifier
);
5258 if (current_elem_size
> max_elem_size
)
5259 max_elem_size
= current_elem_size
;
5261 case AARCH64_OPND_SVE_Pd
:
5262 case AARCH64_OPND_SVE_Pg3
:
5263 case AARCH64_OPND_SVE_Pg4_5
:
5264 case AARCH64_OPND_SVE_Pg4_10
:
5265 case AARCH64_OPND_SVE_Pg4_16
:
5266 case AARCH64_OPND_SVE_Pm
:
5267 case AARCH64_OPND_SVE_Pn
:
5268 case AARCH64_OPND_SVE_Pt
:
5269 inst_pred
= inst_op
;
5277 assert (max_elem_size
!= 0);
5278 aarch64_opnd_info inst_dest
= inst
->operands
[0];
5279 /* Determine the size that should be used to compare against the
5282 = opcode
->constraints
& C_MAX_ELEM
5284 : aarch64_get_qualifier_esize (inst_dest
.qualifier
);
5286 /* If movprfx is predicated do some extra checks. */
5289 /* The instruction must be predicated. */
5290 if (inst_pred_idx
< 0)
5292 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5293 mismatch_detail
->error
= _("predicated instruction expected "
5295 mismatch_detail
->index
= -1;
5296 mismatch_detail
->non_fatal
= TRUE
;
5301 /* The instruction must have a merging predicate. */
5302 if (inst_pred
.qualifier
!= AARCH64_OPND_QLF_P_M
)
5304 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5305 mismatch_detail
->error
= _("merging predicate expected due "
5306 "to preceding `movprfx'");
5307 mismatch_detail
->index
= inst_pred_idx
;
5308 mismatch_detail
->non_fatal
= TRUE
;
5313 /* The same register must be used in instruction. */
5314 if (blk_pred
.reg
.regno
!= inst_pred
.reg
.regno
)
5316 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5317 mismatch_detail
->error
= _("predicate register differs "
5318 "from that in preceding "
5320 mismatch_detail
->index
= inst_pred_idx
;
5321 mismatch_detail
->non_fatal
= TRUE
;
5327 /* Destructive operations by definition must allow one usage of the
5330 = aarch64_is_destructive_by_operands (opcode
) ? 2 : 1;
5332 /* Operand is not used at all. */
5333 if (num_op_used
== 0)
5335 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5336 mismatch_detail
->error
= _("output register of preceding "
5337 "`movprfx' not used in current "
5339 mismatch_detail
->index
= 0;
5340 mismatch_detail
->non_fatal
= TRUE
;
5345 /* We now know it's used, now determine exactly where it's used. */
5346 if (blk_dest
.reg
.regno
!= inst_dest
.reg
.regno
)
5348 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5349 mismatch_detail
->error
= _("output register of preceding "
5350 "`movprfx' expected as output");
5351 mismatch_detail
->index
= 0;
5352 mismatch_detail
->non_fatal
= TRUE
;
5357 /* Operand used more than allowed for the specific opcode type. */
5358 if (num_op_used
> allowed_usage
)
5360 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5361 mismatch_detail
->error
= _("output register of preceding "
5362 "`movprfx' used as input");
5363 mismatch_detail
->index
= last_op_usage
;
5364 mismatch_detail
->non_fatal
= TRUE
;
5369 /* Now the only thing left is the qualifiers checks. The register
5370 must have the same maximum element size. */
5371 if (inst_dest
.qualifier
5372 && blk_dest
.qualifier
5373 && current_elem_size
5374 != aarch64_get_qualifier_esize (blk_dest
.qualifier
))
5376 mismatch_detail
->kind
= AARCH64_OPDE_SYNTAX_ERROR
;
5377 mismatch_detail
->error
= _("register size not compatible with "
5378 "previous `movprfx'");
5379 mismatch_detail
->index
= 0;
5380 mismatch_detail
->non_fatal
= TRUE
;
5387 /* Add the new instruction to the sequence. */
5388 memcpy (insn_sequence
->current_insns
+ insn_sequence
->next_insn
++,
5389 inst
, sizeof (aarch64_inst
));
5391 /* Check if sequence is now full. */
5392 if (insn_sequence
->next_insn
>= insn_sequence
->num_insns
)
5394 /* Sequence is full, but we don't have anything special to do for now,
5395 so clear and reset it. */
5396 init_insn_sequence (NULL
, insn_sequence
);
5404 /* Return true if VALUE cannot be moved into an SVE register using DUP
5405 (with any element size, not just ESIZE) and if using DUPM would
5406 therefore be OK. ESIZE is the number of bytes in the immediate. */
5409 aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue
, int esize
)
5411 int64_t svalue
= uvalue
;
5412 uint64_t upper
= (uint64_t) -1 << (esize
* 4) << (esize
* 4);
5414 if ((uvalue
& ~upper
) != uvalue
&& (uvalue
| upper
) != uvalue
)
5416 if (esize
<= 4 || (uint32_t) uvalue
== (uint32_t) (uvalue
>> 32))
5418 svalue
= (int32_t) uvalue
;
5419 if (esize
<= 2 || (uint16_t) uvalue
== (uint16_t) (uvalue
>> 16))
5421 svalue
= (int16_t) uvalue
;
5422 if (esize
== 1 || (uint8_t) uvalue
== (uint8_t) (uvalue
>> 8))
5426 if ((svalue
& 0xff) == 0)
5428 return svalue
< -128 || svalue
>= 128;
5431 /* Include the opcode description table as well as the operand description
5433 #define VERIFIER(x) verify_##x
5434 #include "aarch64-tbl.h"